patch-2.3.43 linux/include/asm-sparc64/softirq.h
Next file: linux/include/linux/acpi.h
Previous file: linux/include/asm-sparc64/posix_types.h
Back to the patch index
Back to the overall index
- Lines: 119
- Date:
Wed Feb 9 20:08:09 2000
- Orig file:
v2.3.42/linux/include/asm-sparc64/softirq.h
- Orig date:
Mon Jul 19 13:12:47 1999
diff -u --recursive --new-file v2.3.42/linux/include/asm-sparc64/softirq.h linux/include/asm-sparc64/softirq.h
@@ -19,117 +19,6 @@
#define local_bh_disable() (local_bh_count++)
#define local_bh_enable() (local_bh_count--)
-/* The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(mask) \
- __asm__ __volatile__( \
-"1: ldx [%1], %%g7\n" \
-" andn %%g7, %0, %%g5\n" \
-" casx [%1], %%g7, %%g5\n" \
-" cmp %%g7, %%g5\n" \
-" bne,pn %%xcc, 1b\n" \
-" nop" \
- : /* no outputs */ \
- : "HIr" (mask), "r" (&bh_active) \
- : "g5", "g7", "cc", "memory")
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- membar("#StoreStore");
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-#ifndef __SMP__
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_count++;
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_count--;
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
-#define softirq_endlock(cpu) (local_bh_count = 0)
-#define synchronize_bh() barrier()
-
-#else /* (__SMP__) */
-
-extern atomic_t global_bh_lock;
-extern spinlock_t global_bh_count;
-
-extern void synchronize_bh(void);
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the IRQs testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (spin_trylock(&global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0 &&
- cpu_data[cpu].bh_count == 0) {
- ++(cpu_data[cpu].bh_count);
- return 1;
- }
- spin_unlock(&global_bh_count);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- (cpu_data[cpu].bh_count)--;
- spin_unlock(&global_bh_count);
-}
-
-#endif /* (__SMP__) */
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count != 0)
#endif /* !(__SPARC64_SOFTIRQ_H) */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)