patch-2.1.81 linux/include/asm-i386/softirq.h

Next file: linux/include/asm-i386/uaccess.h
Previous file: linux/include/asm-i386/smp.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.80/linux/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
@@ -4,6 +4,9 @@
 #include <asm/atomic.h>
 #include <asm/hardirq.h>
 
+extern unsigned int local_bh_count[NR_CPUS];
+#define in_bh()	(local_bh_count[smp_processor_id()] != 0)
+
 #define get_active_bhs()	(bh_mask & bh_active)
 #define clear_active_bhs(x)	atomic_clear_mask((x),&bh_active)
 
@@ -25,23 +28,6 @@
 	set_bit(nr, &bh_active);
 }
 
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
-	bh_mask &= ~(1 << nr);
-	bh_mask_count[nr]++;
-	synchronize_irq();
-}
-
-extern inline void enable_bh(int nr)
-{
-	if (!--bh_mask_count[nr])
-		bh_mask |= 1 << nr;
-}
-
 #ifdef __SMP__
 
 /*
@@ -49,52 +35,83 @@
  * is entirely private to an implementation, it should not be
  * referenced at all outside of this file.
  */
-extern atomic_t __intel_bh_counter;
+extern atomic_t global_bh_lock;
+extern atomic_t global_bh_count;
 
-extern inline void start_bh_atomic(void)
+extern void synchronize_bh(void);
+
+static inline void start_bh_atomic(void)
 {
-	atomic_inc(&__intel_bh_counter);
-	synchronize_irq();
+	atomic_inc(&global_bh_lock);
+	synchronize_bh();
 }
 
-extern inline void end_bh_atomic(void)
+static inline void end_bh_atomic(void)
 {
-	atomic_dec(&__intel_bh_counter);
+	atomic_dec(&global_bh_lock);
 }
 
 /* These are for the irq's testing the lock */
-static inline int softirq_trylock(void)
+static inline int softirq_trylock(int cpu)
 {
-	atomic_inc(&__intel_bh_counter);
-	if (atomic_read(&__intel_bh_counter) != 1) {
-		atomic_dec(&__intel_bh_counter);
+	unsigned long flags;
+
+	__save_flags(flags);
+	__cli();
+	atomic_inc(&global_bh_count);
+	if (atomic_read(&global_bh_count) != 1 || atomic_read(&global_bh_lock) != 0) {
+		atomic_dec(&global_bh_count);
+		__restore_flags(flags);
 		return 0;
 	}
+	++local_bh_count[cpu];
 	return 1;
 }
 
-#define softirq_endlock()	atomic_dec(&__intel_bh_counter)
+static inline void softirq_endlock(int cpu)
+{
+	__cli();
+	atomic_dec(&global_bh_count);
+	local_bh_count[cpu]--;
+	__sti();
+}
 
 #else
 
-extern int __intel_bh_counter;
-
 extern inline void start_bh_atomic(void)
 {
-	__intel_bh_counter++;
+	local_bh_count[smp_processor_id()]++;
 	barrier();
 }
 
 extern inline void end_bh_atomic(void)
 {
 	barrier();
-	__intel_bh_counter--;
+	local_bh_count[smp_processor_id()]--;
 }
 
 /* These are for the irq's testing the lock */
-#define softirq_trylock()	(__intel_bh_counter ? 0 : (__intel_bh_counter=1))
-#define softirq_endlock()	(__intel_bh_counter = 0)
+#define softirq_trylock()	(in_bh ? 0 : (local_bh_count[smp_processor_id()]=1))
+#define softirq_endlock()	(local_bh_count[smp_processor_id()] = 0)
+#define synchronize_bh()	do { } while (0)
 
 #endif	/* SMP */
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void disable_bh(int nr)
+{
+	bh_mask &= ~(1 << nr);
+	bh_mask_count[nr]++;
+	synchronize_bh();
+}
+
+extern inline void enable_bh(int nr)
+{
+	if (!--bh_mask_count[nr])
+		bh_mask |= 1 << nr;
+}
 
 #endif	/* __ASM_SOFTIRQ_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov