patch-2.2.14 linux/arch/s390/kernel/irq.c

Next file: linux/arch/s390/kernel/irq.h
Previous file: linux/arch/s390/kernel/init_task.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.13/linux/arch/s390/kernel/irq.c linux/arch/s390/kernel/irq.c
@@ -0,0 +1,428 @@
+/*
+ *  arch/s390/kernel/irq.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *
+ *  Derived from "arch/i386/kernel/irq.c"
+ *    Copyright (C) 1992, 1999 Linus Torvalds, Ingo Molnar
+ *
+ *  S/390 I/O interrupt processing and I/O request processing is
+ *  implemented in linux/arch/s390/do_io.c
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/tasks.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/lowcore.h>
+
+#include "irq.h"
+
+
+void s390_init_IRQ(void);
+void s390_free_irq(unsigned int irq, void *dev_id);
+int  s390_request_irq( unsigned int   irq,
+                       void           (*handler)(int, void *, struct pt_regs *),
+                       unsigned long  irqflags,
+                       const char    *devname,
+                       void          *dev_id);
+
+atomic_t nmi_counter;
+spinlock_t s390_bh_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Dummy controller type for unused interrupts
+ */
+int  do_none(unsigned int irq, int cpu, struct pt_regs * regs) { return 0;}
+int  enable_none(unsigned int irq) { return(-ENODEV); }
+int  disable_none(unsigned int irq) { return(-ENODEV); }
+
+struct hw_interrupt_type no_irq_type = {
+	"none",
+	do_none,
+	enable_none,
+	disable_none
+};
+
+irq_desc_t irq_desc[NR_IRQS] = {
+	[0 ... (NR_IRQS-1)] = { 0, &no_irq_type, },
+};
+
+#if 0
+/*
+ * The following vectors are part of the Linux architecture, there
+ * is no hardware IRQ pin equivalent for them, they are triggered
+ * through the ICC by us (IPIs), via smp_message_pass():
+ */
+BUILD_SMP_INTERRUPT(reschedule_interrupt)
+BUILD_SMP_INTERRUPT(invalidate_interrupt)
+BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
+BUILD_SMP_INTERRUPT(mtrr_interrupt)
+BUILD_SMP_INTERRUPT(spurious_interrupt)
+#endif
+
+#if 0
+static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+#endif
+
+int get_irq_list(char *buf)
+{
+	int i, j;
+	struct irqaction * action;
+	char *p = buf;
+
+	p += sprintf(p, "           ");
+	for (j=0; j<smp_num_cpus; j++)
+		p += sprintf(p, "CPU%d       ",j);
+	*p++ = '\n';
+
+	for (i = 0 ; i < NR_IRQS ; i++) {
+		action = irq_desc[i].action;
+		if (!action)
+			continue;
+		p += sprintf(p, "%3d: ",i);
+#ifndef __SMP__
+		p += sprintf(p, "%10u ", kstat_irqs(i));
+#else
+		for (j=0; j<smp_num_cpus; j++)
+			p += sprintf(p, "%10u ",
+				     kstat.irqs[cpu_logical_map(j)][i]);
+#endif
+		p += sprintf(p, " %14s", irq_desc[i].handler->typename);
+		p += sprintf(p, "  %s", action->name);
+
+		for (action=action->next; action; action = action->next) {
+			p += sprintf(p, ", %s", action->name);
+		}
+		*p++ = '\n';
+	}
+	p += sprintf(p, "NMI: %10u\n", atomic_read(&nmi_counter));
+#ifdef __SMP__
+	p += sprintf(p, "IPI: %10u\n", atomic_read(&ipi_count));
+#endif
+	return p - buf;
+}
+
+/*
+ * Global interrupt locks for SMP. Allow interrupts to come in on any
+ * CPU, yet make cli/sti act globally to protect critical regions..
+ */
+#ifdef __SMP__
+atomic_t global_irq_holder = ATOMIC_INIT(NO_PROC_ID);
+atomic_t global_irq_lock;
+atomic_t global_irq_count = ATOMIC_INIT(0);
+
+atomic_t global_bh_count;
+atomic_t global_bh_lock;
+
+/*
+ * "global_cli()" is a special case, in that it can hold the
+ * interrupts disabled for a longish time, and also because
+ * we may be doing TLB invalidates when holding the global
+ * IRQ lock for historical reasons. Thus we may need to check
+ * SMP invalidate events specially by hand here (but not in
+ * any normal spinlocks)
+ *
+ * Thankfully we don't need this as we can deliver flush tlbs with
+ * interrupts disabled DJB :-)
+ */
+#define check_smp_invalidate(cpu)
+
+static void show(char * str)
+{
+	int i;
+	unsigned long *stack;
+	int cpu = smp_processor_id();
+
+	printk("\n%s, CPU %d:\n", str, cpu);
+	printk("irq:  %d [%d]\n",
+	       atomic_read(&global_irq_count),atomic_read(&S390_lowcore.local_irq_count));
+	printk("bh:   %d [%d]\n",
+	       atomic_read(&global_bh_count),atomic_read(&S390_lowcore.local_bh_count));
+	stack = (unsigned long *) &str;
+	for (i = 40; i ; i--) {
+		unsigned long x = *++stack;
+		if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
+			printk("<[%08lx]> ", x);
+		}
+	}
+}
+
+#define MAXCOUNT 100000000
+
+static inline void wait_on_bh(void)
+{
+	int count = MAXCOUNT;
+	do {
+		if (!--count) {
+			show("wait_on_bh");
+			count = ~0;
+		}
+		/* nothing .. wait for the other bh's to go away */
+	} while (atomic_read(&global_bh_count) != 0);
+}
+
+static inline void wait_on_irq(int cpu)
+{
+	int count = MAXCOUNT;
+
+	for (;;) {
+
+		/*
+		 * Wait until all interrupts are gone. Wait
+		 * for bottom half handlers unless we're
+		 * already executing in one..
+		 */
+		if (!atomic_read(&global_irq_count)) {
+			if (atomic_read(&safe_get_cpu_lowcore(cpu).local_bh_count)||
+			    !atomic_read(&global_bh_count))
+				break;
+		}
+
+		/* Duh, we have to loop. Release the lock to avoid deadlocks */
+		clear_bit(0,&global_irq_lock);
+
+		for (;;) {
+			if (!--count) {
+				show("wait_on_irq");
+				count = ~0;
+			}
+			__sti();
+			SYNC_OTHER_CORES(cpu);
+			__cli();
+			check_smp_invalidate(cpu);
+			if (atomic_read(&global_irq_count))
+				continue;
+			if (atomic_read(&global_irq_lock))
+				continue;
+			if (!(atomic_read(&safe_get_cpu_lowcore(cpu).local_bh_count))
+			    && atomic_read(&global_bh_count))
+				continue;
+			if (!test_and_set_bit(0,&global_irq_lock))
+				break;
+		}
+	}
+}
+
+/*
+ * This is called when we want to synchronize with
+ * bottom half handlers. We need to wait until
+ * no other CPU is executing any bottom half handler.
+ *
+ * Don't wait if we're already running in an interrupt
+ * context or are inside a bh handler.
+ */
+void synchronize_bh(void)
+{
+	if (atomic_read(&global_bh_count) && !in_interrupt())
+		wait_on_bh();
+}
+
+/*
+ * This is called when we want to synchronize with
+ * interrupts. We may for example tell a device to
+ * stop sending interrupts: but to make sure there
+ * are no interrupts that are executing on another
+ * CPU we need to call this function.
+ */
+void synchronize_irq(void)
+{
+	if (atomic_read(&global_irq_count)) {
+		/* Stupid approach */
+		cli();
+		sti();
+	}
+}
+
+static inline void get_irqlock(int cpu)
+{
+	if (test_and_set_bit(0,&global_irq_lock)) {
+		/* do we already hold the lock? */
+		if ( cpu == atomic_read(&global_irq_holder))
+			return;
+		/* Uhhuh.. Somebody else got it. Wait.. */
+		do {
+			do {
+				check_smp_invalidate(cpu);
+			} while (test_bit(0,&global_irq_lock));
+		} while (test_and_set_bit(0,&global_irq_lock));
+	}
+	/*
+	 * We also to make sure that nobody else is running
+	 * in an interrupt context.
+	 */
+	wait_on_irq(cpu);
+
+	/*
+	 * Ok, finally..
+	 */
+	atomic_set(&global_irq_holder,cpu);
+}
+
+#define EFLAGS_I_SHIFT 25
+
+/*
+ * A global "cli()" while in an interrupt context
+ * turns into just a local cli(). Interrupts
+ * should use spinlocks for the (very unlikely)
+ * case that they ever want to protect against
+ * each other.
+ *
+ * If we already have local interrupts disabled,
+ * this will not turn a local disable into a
+ * global one (problems with spinlocks: this makes
+ * save_flags+cli+sti usable inside a spinlock).
+ */
+void __global_cli(void)
+{
+	unsigned int flags;
+
+	__save_flags(flags);
+	if (flags & (1 << EFLAGS_I_SHIFT)) {
+		int cpu = smp_processor_id();
+		__cli();
+		if (!atomic_read(&S390_lowcore.local_irq_count))
+			get_irqlock(cpu);
+	}
+}
+
+void __global_sti(void)
+{
+
+	if (!atomic_read(&S390_lowcore.local_irq_count))
+		release_irqlock(smp_processor_id());
+	__sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long __global_save_flags(void)
+{
+	int retval;
+	int local_enabled;
+	unsigned long flags;
+
+	__save_flags(flags);
+	local_enabled = (flags >> EFLAGS_I_SHIFT) & 1;
+	/* default to local */
+	retval = 2 + local_enabled;
+
+	/* check for global flags if we're not in an interrupt */
+	if (!atomic_read(&S390_lowcore.local_irq_count))
+	{
+		if (local_enabled)
+			retval = 1;
+		if (atomic_read(&global_irq_holder)== smp_processor_id())
+			retval = 0;
+	}
+	return retval;
+}
+
+void __global_restore_flags(unsigned long flags)
+{
+	switch (flags) {
+	case 0:
+		__global_cli();
+		break;
+	case 1:
+		__global_sti();
+		break;
+	case 2:
+		__cli();
+		break;
+	case 3:
+		__sti();
+		break;
+	default:
+		printk("global_restore_flags: %08lx (%08lx)\n",
+		       flags, (&flags)[-1]);
+	}
+}
+
+#endif
+
+/*
+ * Note : This fuction should be eliminated as it doesn't comply with the
+ *         S/390 irq scheme we have implemented ...
+ */
+int handle_IRQ_event(unsigned int irq, int cpu, struct pt_regs * regs)
+{
+	struct irqaction * action;
+	int                status;
+
+	status = 0;
+	action = irq_desc[irq].action;
+
+	if (action) {
+		status |= 1;
+
+		if (!(action->flags & SA_INTERRUPT))
+			__sti();
+
+		do {
+			status |= action->flags;
+			action->handler(irq, action->dev_id, regs);
+			action = action->next;
+		} while (action);
+		if (status & SA_SAMPLE_RANDOM)
+			add_interrupt_randomness(irq);
+		__cli();
+	}
+
+	return status;
+}
+
+
+void enable_nop(int irq)
+{
+}
+
+__initfunc(void init_IRQ(void))
+{
+
+   s390_init_IRQ();
+
+}
+
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+   s390_free_irq( irq, dev_id);
+}
+
+
+int request_irq( unsigned int   irq,
+                 void           (*handler)(int, void *, struct pt_regs *),
+                 unsigned long  irqflags,
+                 const char    *devname,
+                 void          *dev_id)
+{
+   return( s390_request_irq( irq, handler, irqflags, devname, dev_id ) );
+
+}
+

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)