patch-2.4.22 linux-2.4.22/include/asm-mips64/semaphore-helper.h

Next file: linux-2.4.22/include/asm-mips64/semaphore.h
Previous file: linux-2.4.22/include/asm-mips64/scatterlist.h
Back to the patch index
Back to the overall index

diff -urN linux-2.4.21/include/asm-mips64/semaphore-helper.h linux-2.4.22/include/asm-mips64/semaphore-helper.h
@@ -1,14 +1,21 @@
 /*
  * SMP- and interrupt-safe semaphores helper functions.
  *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- * (C) Copyright 1999, 2001 Ralf Baechle
- * (C) Copyright 1999, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 1996 Linus Torvalds
+ * Copyright (C) 1999 Andrea Arcangeli
+ * Copyright (C) 1999, 2001, 2002 Ralf Baechle
+ * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
  */
 #ifndef _ASM_SEMAPHORE_HELPER_H
 #define _ASM_SEMAPHORE_HELPER_H
 
+#include <linux/config.h>
+#include <linux/errno.h>
+
+#define sem_read(a) ((a)->counter)
+#define sem_inc(a) (((a)->counter)++)
+#define sem_dec(a) (((a)->counter)--)
 /*
  * These two _must_ execute atomically wrt each other.
  */
@@ -17,6 +24,8 @@
 	atomic_inc(&sem->waking);
 }
 
+#ifdef CONFIG_CPU_HAS_LLSC
+
 static inline int waking_non_zero(struct semaphore *sem)
 {
 	int ret, tmp;
@@ -34,6 +43,30 @@
 	return ret;
 }
 
+#else /* !CONFIG_CPU_HAS_LLSC */
+
+/*
+ * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
+ * Do it once and that's it. ll/sc *has* it's advantages. HK
+ */
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	local_irq_save(flags);
+	if (sem_read(&sem->waking) > 0) {
+		sem_dec(&sem->waking);
+		ret = 1;
+	}
+	local_irq_restore(flags);
+	return ret;
+}
+#endif /* !CONFIG_CPU_HAS_LLSC */
+
+#ifdef CONFIG_CPU_HAS_LLDSCD
+
 /*
  * waking_non_zero_interruptible:
  *	1	got the lock
@@ -46,6 +79,11 @@
  *
  * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
  *
+ * This is crazy.  Normally it's strictly forbidden to use 64-bit operations
+ * in the 32-bit MIPS kernel.  In this case it's however ok because if an
+ * interrupt has destroyed the upper half of registers sc will fail.
+ * Note also that this will not work for MIPS32 CPUs!
+ *
  * Pseudocode:
  *
  * If(sem->waking > 0) {
@@ -64,60 +102,27 @@
 {
 	long ret, tmp;
 
-#ifdef __MIPSEB__
-
-        __asm__ __volatile__(
+	__asm__ __volatile__(
 	".set\tpush\t\t\t# waking_non_zero_interruptible\n\t"
-	".set\tnoat\n\t"
+	".set\tmips3\n\t"
+	".set\tnoat\n"
 	"0:\tlld\t%1, %2\n\t"
 	"li\t%0, 0\n\t"
 	"sll\t$1, %1, 0\n\t"
 	"blez\t$1, 1f\n\t"
 	"daddiu\t%1, %1, -1\n\t"
 	"li\t%0, 1\n\t"
-	"b\t2f\n\t"
+	"b\t2f\n"
 	"1:\tbeqz\t%3, 2f\n\t"
 	"li\t%0, %4\n\t"
 	"dli\t$1, 0x0000000100000000\n\t"
-	"daddu\t%1, %1, $1\n\t"
+	"daddu\t%1, %1, $1\n"
 	"2:\tscd\t%1, %2\n\t"
 	"beqz\t%1, 0b\n\t"
 	".set\tpop"
 	: "=&r" (ret), "=&r" (tmp), "=m" (*sem)
 	: "r" (signal_pending(tsk)), "i" (-EINTR));
 
-#elif defined(__MIPSEL__)
-
-	__asm__ __volatile__(
-	".set\tpush\t\t\t# waking_non_zero_interruptible\n\t"
-	".set\t	noat\n"
-	"0:\tlld\t%1, %2\n\t"
-	"li\t%0, 0\n\t"
-	"blez\t%1, 1f\n\t"
-	"dli\t$1, 0x0000000100000000\n\t"
-	"dsubu\t%1, %1, $1\n\t"
-	"li\t%0, 1\n\t"
-	"b\t2f\n"
-	"1:\tbeqz\t%3, 2f\n\t"
-	"li\t%0, %4\n\t"
-	/*
-	 * It would be nice to assume that sem->count
-	 * is != -1, but we will guard against that case
-	 */
-	"daddiu\t$1, %1, 1\n\t"
-	"dsll32\t$1, $1, 0\n\t"
-	"dsrl32\t$1, $1, 0\n\t"
-	"dsrl32\t%1, %1, 0\n\t"
-	"dsll32\t%1, %1, 0\n\t"
-	"or\t%1, %1, $1\n"
-	"2:\tscd\t%1, %2\n\t"
-	"beqz\t	%1, 0b\n\t"
-	".set\tpop"
-	: "=&r" (ret), "=&r" (tmp), "=m" (*sem)
-	: "r" (signal_pending(tsk)), "i" (-EINTR));
-
-#endif
-
 	return ret;
 }
 
@@ -135,4 +140,43 @@
 	return 0;
 }
 
+#else /* !CONFIG_CPU_HAS_LLDSCD */
+
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
+						struct task_struct *tsk)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (sem_read(&sem->waking) > 0) {
+		sem_dec(&sem->waking);
+		ret = 1;
+	} else if (signal_pending(tsk)) {
+		sem_inc(&sem->count);
+		ret = -EINTR;
+	}
+	local_irq_restore(flags);
+	return ret;
+}
+
+static inline int waking_non_zero_trylock(struct semaphore *sem)
+{
+        int ret = 1;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (sem_read(&sem->waking) <= 0)
+		sem_inc(&sem->count);
+	else {
+		sem_dec(&sem->waking);
+		ret = 0;
+	}
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+#endif /* !CONFIG_CPU_HAS_LLDSCD */
+
 #endif /* _ASM_SEMAPHORE_HELPER_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)