patch-2.2.14 linux/arch/alpha/kernel/irq.c

Next file: linux/arch/alpha/kernel/irq.h
Previous file: linux/arch/alpha/kernel/entry.S
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.13/linux/arch/alpha/kernel/irq.c linux/arch/alpha/kernel/irq.c
@@ -35,12 +35,16 @@
 #define vulp	volatile unsigned long *
 #define vuip	volatile unsigned int *
 
-unsigned int local_irq_count[NR_CPUS];
-unsigned int local_bh_count[NR_CPUS];
-unsigned long hardirq_no[NR_CPUS];
+/* Only uniprocessor needs this IRQ/BH locking depth, on SMP it
+ * lives in the per-cpu structure for cache reasons.
+ */
+#ifndef __SMP__
+unsigned long local_bh_count;
+unsigned long local_irq_count;
+#endif
 
-#if NR_IRQS > 64
-#  error Unable to handle more than 64 irq levels.
+#if NR_IRQS > 128
+#  error Unable to handle more than 128 irq levels.
 #endif
 
 #ifdef CONFIG_ALPHA_GENERIC
@@ -57,7 +61,7 @@
 /*
  * Shadow-copy of masked interrupts.
  */
-unsigned long alpha_irq_mask = ~0UL;
+unsigned long _alpha_irq_masks[2] = {~0UL, ~0UL };
 
 /*
  * The ack_irq routine used by 80% of the systems.
@@ -107,6 +111,8 @@
 # define IACK_SC	TSUNAMI_IACK_SC
 #elif defined(CONFIG_ALPHA_POLARIS)
 # define IACK_SC	POLARIS_IACK_SC
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# define IACK_SC        IRONGATE_IACK_SC
 #else
   /* This is bogus but necessary to get it to compile on all platforms. */
 # define IACK_SC	1L
@@ -182,13 +188,15 @@
 static inline void
 mask_irq(unsigned long irq)
 {
-	alpha_mv.update_irq_hw(irq, alpha_irq_mask |= 1UL << irq, 0);
+	set_bit(irq, _alpha_irq_masks);
+	alpha_mv.update_irq_hw(irq, alpha_irq_mask, 0);
 }
 
 static inline void
 unmask_irq(unsigned long irq)
 {
-	alpha_mv.update_irq_hw(irq, alpha_irq_mask &= ~(1UL << irq), 1);
+	clear_bit(irq, _alpha_irq_masks);
+	alpha_mv.update_irq_hw(irq, alpha_irq_mask, 1);
 }
 
 void
@@ -205,7 +213,7 @@
 disable_irq(unsigned int irq_nr)
 {
 	/* This works non-SMP, and SMP until we write code to distribute
-	   interrupts to more that cpu 0.  */
+	   interrupts to more than CPU 0.  */
 	disable_irq_nosync(irq_nr);
 }
 
@@ -384,6 +392,8 @@
 
 static void show(char * str, void *where);
 
+#define SYNC_OTHER_CPUS(x)	udelay((x)+1)
+
 static inline void
 wait_on_irq(int cpu, void *where)
 {
@@ -397,8 +407,7 @@
 		 * already executing in one..
 		 */
 		if (!atomic_read(&global_irq_count)) {
-			if (local_bh_count[cpu] ||
-			    !atomic_read(&global_bh_count))
+			if (local_bh_count || !atomic_read(&global_bh_count))
 				break;
 		}
 
@@ -412,18 +421,14 @@
 				count = MAXCOUNT;
 			}
 			__sti();
-#if 0
-			SYNC_OTHER_CORES(cpu);
-#else
-			udelay(cpu+1);
-#endif
+			SYNC_OTHER_CPUS(cpu);
 			__cli();
 
 			if (atomic_read(&global_irq_count))
 				continue;
-			if (global_irq_lock.lock)
+			if (spin_is_locked(&global_irq_lock))
 				continue;
-			if (!local_bh_count[cpu] &&
+			if (!local_bh_count &&
 			    atomic_read(&global_bh_count))
 				continue;
 			if (spin_trylock(&global_irq_lock))
@@ -437,13 +442,8 @@
 {
 	if (!spin_trylock(&global_irq_lock)) {
 		/* do we already hold the lock? */
-		if (cpu == global_irq_holder) {
-#if 0
-			printk("get_irqlock: already held at %08lx\n",
-			       previous_irqholder);
-#endif
+		if (cpu == global_irq_holder)
 			return;
-		}
 		/* Uhhuh.. Somebody else got it. Wait.. */
 		spin_lock(&global_irq_lock);
 	}
@@ -469,25 +469,19 @@
 void
 __global_cli(void)
 {
-	int cpu;
-	void *where = __builtin_return_address(0);
-
 	/*
 	 * Maximize ipl.  If ipl was previously 0 and if this thread
 	 * is not in an irq, then take global_irq_lock.
 	 */
-	if ((swpipl(7) == 0) && !local_irq_count[cpu = smp_processor_id()])
-		get_irqlock(cpu, where);
+	if ((swpipl(7) == 0) && !local_irq_count)
+		get_irqlock(smp_processor_id(), __builtin_return_address(0));
 }
 
 void
 __global_sti(void)
 {
-        int cpu = smp_processor_id();
-
-        if (!local_irq_count[cpu]) {
-		release_irqlock(cpu);
-	}
+        if (!local_irq_count)
+		release_irqlock(smp_processor_id());
 	__sti();
 }
 
@@ -512,7 +506,7 @@
         retval = 2 + local_enabled;
 
         /* Check for global flags if we're not in an interrupt.  */
-        if (!local_irq_count[cpu]) {
+        if (!local_irq_count) {
                 if (local_enabled)
                         retval = 1;
                 if (global_irq_holder == cpu)
@@ -566,11 +560,10 @@
 
 	hardirq_enter(cpu, irq);
 	barrier();
-	while (global_irq_lock.lock) {
+	while (spin_is_locked(&global_irq_lock)) {
 		if (cpu == global_irq_holder) {
-			int globl_locked = global_irq_lock.lock;
+			int globl_locked = spin_is_locked(&global_irq_lock);
 			int globl_icount = atomic_read(&global_irq_count);
-			int local_count = local_irq_count[cpu];
 
 			/* It is very important that we load the state
 			   variables before we do the first call to
@@ -578,9 +571,9 @@
 			   them...  */
 
 			printk("CPU[%d]: where [%p] glocked[%d] gicnt[%d]"
-			       " licnt[%d]\n",
+			       " licnt[%ld]\n",
 			       cpu, previous_irqholder, globl_locked,
-			       globl_icount, local_count);
+			       globl_icount, local_irq_count);
 #ifdef VERBOSE_IRQLOCK_DEBUGGING
 			printk("Performing backtrace on all CPUs,"
 			       " write this down!\n");
@@ -609,19 +602,17 @@
 #endif
         int cpu = smp_processor_id();
 
-	int global_count = atomic_read(&global_irq_count);
-        int local_count0 = local_irq_count[0];
-        int local_count1 = local_irq_count[1];
-        long hardirq_no0 = hardirq_no[0];
-        long hardirq_no1 = hardirq_no[1];
-
         printk("\n%s, CPU %d: %p\n", str, cpu, where);
-        printk("irq:  %d [%d(0x%016lx) %d(0x%016lx)]\n", global_count,
-               local_count0, hardirq_no0, local_count1, hardirq_no1);
+	printk("irq:  %d [%ld %ld]\n",
+	       atomic_read(&global_irq_count),
+	       cpu_data[0].irq_count,
+	       cpu_data[1].irq_count);
+
+	printk("bh:   %d [%ld %ld]\n",
+	       atomic_read(&global_bh_count),
+	       cpu_data[0].bh_count,
+	       cpu_data[1].bh_count);
 
-        printk("bh:   %d [%d %d]\n",
-	       atomic_read(&global_bh_count), local_bh_count[0],
-	       local_bh_count[1]);
 #if 0
         stack = (unsigned long *) &str;
         for (i = 40; i ; i--) {
@@ -644,6 +635,7 @@
                         count = ~0;
                 }
                 /* nothing .. wait for the other bh's to go away */
+		barrier();
         } while (atomic_read(&global_bh_count) != 0);
 }
 
@@ -658,12 +650,8 @@
 void
 synchronize_bh(void)
 {
-	if (atomic_read(&global_bh_count)) {
-		int cpu = smp_processor_id();
-                if (!local_irq_count[cpu] && !local_bh_count[cpu]) {
+	if (atomic_read(&global_bh_count) && !in_interrupt())
 			wait_on_bh();
-		}
-        }
 }
 
 /*
@@ -680,6 +668,7 @@
 void
 synchronize_irq(void)
 {
+#ifdef JOES_ORIGINAL_VERSION
 	int cpu = smp_processor_id();
 	int local_count;
 	int global_count;
@@ -688,7 +677,7 @@
 
 	mb();
 	do {
-		local_count = local_irq_count[cpu];
+		local_count = local_irq_count;
 		global_count = atomic_read(&global_irq_count);
 		if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
 			printk("%d:%d/%d\n", cpu, local_count, global_count);
@@ -696,12 +685,19 @@
 			break;
 		}
 	} while (global_count != local_count);
+#else
+	if (atomic_read(&global_irq_count)) {
+		/* Stupid approach */
+		cli();
+		sti();
+	}
+#endif
 }
 
 #else /* !__SMP__ */
 
-#define irq_enter(cpu, irq)	(++local_irq_count[cpu])
-#define irq_exit(cpu, irq)	(--local_irq_count[cpu])
+#define irq_enter(cpu, irq)	(++local_irq_count)
+#define irq_exit(cpu, irq)	(--local_irq_count)
 
 #endif /* __SMP__ */
 
@@ -816,7 +812,8 @@
 	unsigned long delay;
 	unsigned int i;
 
-	for (i = ACTUAL_NR_IRQS - 1; i > 0; i--) {
+	/* Handle only the first 64 IRQs here. */
+	for (i = (ACTUAL_NR_IRQS - 1) & 63; i > 0; i--) {
 		if (!(PROBE_MASK & (1UL << i))) {
 			continue;
 		}
@@ -849,6 +846,7 @@
 {
 	int i;
 	
+	/* Handle only the first 64 IRQs here. */
         irqs &= alpha_irq_mask;
 	if (!irqs)
 		return 0;
@@ -933,6 +931,19 @@
 		       (expected?"expected.":"NOT expected!!!"));
 	if (expected)
 		return;
+
+	/* Just in case we get some incomplete arguments... */
+	if (!la_ptr) {
+	    if (!regs)
+		printk(KERN_CRIT "%s machine check: vector=0x%lx\n",
+		       machine, vector);
+	    else
+		printk(KERN_CRIT "%s machine check: vector=0x%lx"
+		       " pc=0x%lx ra=0x%lx args=0x%lx/0x%lx/0x%lx\n",
+		       machine, vector, regs->pc, regs->r26,
+		       regs->r16, regs->r17, regs->r18);
+	    return;
+	}
 
 	mchk_header = (struct el_common *)la_ptr;
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)