patch-2.2.11 linux/include/asm-i386/semaphore-helper.h
Next file: linux/include/asm-i386/semaphore.h
Previous file: linux/include/asm-i386/pgtable.h
Back to the patch index
Back to the overall index
- Lines: 71
- Date:
Mon Aug 9 12:04:41 1999
- Orig file:
v2.2.10/linux/include/asm-i386/semaphore-helper.h
- Orig date:
Wed Feb 17 09:34:13 1999
diff -u --recursive --new-file v2.2.10/linux/include/asm-i386/semaphore-helper.h linux/include/asm-i386/semaphore-helper.h
@@ -13,14 +13,19 @@
*
* This is trivially done with load_locked/store_cond,
* but on the x86 we need an external synchronizer.
+ *
+ * NOTE: we can't look at the semaphore count here since it can be
+ * unreliable. Even if the count is minor than 1, the semaphore
+ * could be just owned by another process (this because not only up() increases
+ * the semaphore count, also the interruptible/trylock call can increment
+ * the semaphore count when they fails).
*/
static inline void wake_one_more(struct semaphore * sem)
{
unsigned long flags;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (atomic_read(&sem->count) <= 0)
- sem->waking++;
+ sem->waking++;
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
}
@@ -44,9 +49,11 @@
* 0 go to sleep
* -EINTR interrupted
*
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ * If we give up we must undo our count-decrease we previously did in down().
+ * Subtle: up() can continue to happens and increase the semaphore count
+ * even during our critical section protected by the spinlock. So
+ * we must remeber to undo the sem->waking that will be run from
+ * wake_one_more() some time soon, if the semaphore count become > 0.
*/
static inline int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
@@ -59,7 +66,8 @@
sem->waking--;
ret = 1;
} else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
+ if (atomic_inc_and_test_greater_zero(&sem->count))
+ sem->waking--;
ret = -EINTR;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
@@ -71,9 +79,7 @@
* 1 failed to lock
* 0 got the lock
*
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ * Implementation details are the same of the interruptible case.
*/
static inline int waking_non_zero_trylock(struct semaphore *sem)
{
@@ -82,8 +88,10 @@
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->waking <= 0)
- atomic_inc(&sem->count);
- else {
+ {
+ if (atomic_inc_and_test_greater_zero(&sem->count))
+ sem->waking--;
+ } else {
sem->waking--;
ret = 0;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)