genirq: Provide compat handling for chip->disable()/shutdown()
[pandora-kernel.git] / kernel / irq / chip.c
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18
19 #include "internals.h"
20
21 static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 {
23         struct irq_desc *desc;
24         unsigned long flags;
25
26         desc = irq_to_desc(irq);
27         if (!desc) {
28                 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
29                 return;
30         }
31
32         /* Ensure we don't have left over values from a previous use of this irq */
33         raw_spin_lock_irqsave(&desc->lock, flags);
34         desc->status = IRQ_DISABLED;
35         desc->irq_data.chip = &no_irq_chip;
36         desc->handle_irq = handle_bad_irq;
37         desc->depth = 1;
38         desc->irq_data.msi_desc = NULL;
39         desc->irq_data.handler_data = NULL;
40         if (!keep_chip_data)
41                 desc->irq_data.chip_data = NULL;
42         desc->action = NULL;
43         desc->irq_count = 0;
44         desc->irqs_unhandled = 0;
45 #ifdef CONFIG_SMP
46         cpumask_setall(desc->irq_data.affinity);
47 #ifdef CONFIG_GENERIC_PENDING_IRQ
48         cpumask_clear(desc->pending_mask);
49 #endif
50 #endif
51         raw_spin_unlock_irqrestore(&desc->lock, flags);
52 }
53
54 /**
55  *      dynamic_irq_init - initialize a dynamically allocated irq
56  *      @irq:   irq number to initialize
57  */
58 void dynamic_irq_init(unsigned int irq)
59 {
60         dynamic_irq_init_x(irq, false);
61 }
62
63 /**
64  *      dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65  *      @irq:   irq number to initialize
66  *
67  *      does not set irq_to_desc(irq)->irq_data.chip_data to NULL
68  */
69 void dynamic_irq_init_keep_chip_data(unsigned int irq)
70 {
71         dynamic_irq_init_x(irq, true);
72 }
73
74 static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
75 {
76         struct irq_desc *desc = irq_to_desc(irq);
77         unsigned long flags;
78
79         if (!desc) {
80                 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
81                 return;
82         }
83
84         raw_spin_lock_irqsave(&desc->lock, flags);
85         if (desc->action) {
86                 raw_spin_unlock_irqrestore(&desc->lock, flags);
87                 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
88                         irq);
89                 return;
90         }
91         desc->irq_data.msi_desc = NULL;
92         desc->irq_data.handler_data = NULL;
93         if (!keep_chip_data)
94                 desc->irq_data.chip_data = NULL;
95         desc->handle_irq = handle_bad_irq;
96         desc->irq_data.chip = &no_irq_chip;
97         desc->name = NULL;
98         clear_kstat_irqs(desc);
99         raw_spin_unlock_irqrestore(&desc->lock, flags);
100 }
101
102 /**
103  *      dynamic_irq_cleanup - cleanup a dynamically allocated irq
104  *      @irq:   irq number to initialize
105  */
106 void dynamic_irq_cleanup(unsigned int irq)
107 {
108         dynamic_irq_cleanup_x(irq, false);
109 }
110
111 /**
112  *      dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113  *      @irq:   irq number to initialize
114  *
115  *      does not set irq_to_desc(irq)->irq_data.chip_data to NULL
116  */
117 void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118 {
119         dynamic_irq_cleanup_x(irq, true);
120 }
121
122
123 /**
124  *      set_irq_chip - set the irq chip for an irq
125  *      @irq:   irq number
126  *      @chip:  pointer to irq chip description structure
127  */
128 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
129 {
130         struct irq_desc *desc = irq_to_desc(irq);
131         unsigned long flags;
132
133         if (!desc) {
134                 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
135                 return -EINVAL;
136         }
137
138         if (!chip)
139                 chip = &no_irq_chip;
140
141         raw_spin_lock_irqsave(&desc->lock, flags);
142         irq_chip_set_defaults(chip);
143         desc->irq_data.chip = chip;
144         raw_spin_unlock_irqrestore(&desc->lock, flags);
145
146         return 0;
147 }
148 EXPORT_SYMBOL(set_irq_chip);
149
150 /**
151  *      set_irq_type - set the irq trigger type for an irq
152  *      @irq:   irq number
153  *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
154  */
155 int set_irq_type(unsigned int irq, unsigned int type)
156 {
157         struct irq_desc *desc = irq_to_desc(irq);
158         unsigned long flags;
159         int ret = -ENXIO;
160
161         if (!desc) {
162                 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
163                 return -ENODEV;
164         }
165
166         type &= IRQ_TYPE_SENSE_MASK;
167         if (type == IRQ_TYPE_NONE)
168                 return 0;
169
170         raw_spin_lock_irqsave(&desc->lock, flags);
171         ret = __irq_set_trigger(desc, irq, type);
172         raw_spin_unlock_irqrestore(&desc->lock, flags);
173         return ret;
174 }
175 EXPORT_SYMBOL(set_irq_type);
176
177 /**
178  *      set_irq_data - set irq type data for an irq
179  *      @irq:   Interrupt number
180  *      @data:  Pointer to interrupt specific data
181  *
182  *      Set the hardware irq controller data for an irq
183  */
184 int set_irq_data(unsigned int irq, void *data)
185 {
186         struct irq_desc *desc = irq_to_desc(irq);
187         unsigned long flags;
188
189         if (!desc) {
190                 printk(KERN_ERR
191                        "Trying to install controller data for IRQ%d\n", irq);
192                 return -EINVAL;
193         }
194
195         raw_spin_lock_irqsave(&desc->lock, flags);
196         desc->irq_data.handler_data = data;
197         raw_spin_unlock_irqrestore(&desc->lock, flags);
198         return 0;
199 }
200 EXPORT_SYMBOL(set_irq_data);
201
202 /**
203  *      set_irq_msi - set MSI descriptor data for an irq
204  *      @irq:   Interrupt number
205  *      @entry: Pointer to MSI descriptor data
206  *
207  *      Set the MSI descriptor entry for an irq
208  */
209 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
210 {
211         struct irq_desc *desc = irq_to_desc(irq);
212         unsigned long flags;
213
214         if (!desc) {
215                 printk(KERN_ERR
216                        "Trying to install msi data for IRQ%d\n", irq);
217                 return -EINVAL;
218         }
219
220         raw_spin_lock_irqsave(&desc->lock, flags);
221         desc->irq_data.msi_desc = entry;
222         if (entry)
223                 entry->irq = irq;
224         raw_spin_unlock_irqrestore(&desc->lock, flags);
225         return 0;
226 }
227
228 /**
229  *      set_irq_chip_data - set irq chip data for an irq
230  *      @irq:   Interrupt number
231  *      @data:  Pointer to chip specific data
232  *
233  *      Set the hardware irq chip data for an irq
234  */
235 int set_irq_chip_data(unsigned int irq, void *data)
236 {
237         struct irq_desc *desc = irq_to_desc(irq);
238         unsigned long flags;
239
240         if (!desc) {
241                 printk(KERN_ERR
242                        "Trying to install chip data for IRQ%d\n", irq);
243                 return -EINVAL;
244         }
245
246         if (!desc->irq_data.chip) {
247                 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
248                 return -EINVAL;
249         }
250
251         raw_spin_lock_irqsave(&desc->lock, flags);
252         desc->irq_data.chip_data = data;
253         raw_spin_unlock_irqrestore(&desc->lock, flags);
254
255         return 0;
256 }
257 EXPORT_SYMBOL(set_irq_chip_data);
258
259 /**
260  *      set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
261  *
262  *      @irq:   Interrupt number
263  *      @nest:  0 to clear / 1 to set the IRQ_NESTED_THREAD flag
264  *
265  *      The IRQ_NESTED_THREAD flag indicates that on
266  *      request_threaded_irq() no separate interrupt thread should be
267  *      created for the irq as the handler are called nested in the
268  *      context of a demultiplexing interrupt handler thread.
269  */
270 void set_irq_nested_thread(unsigned int irq, int nest)
271 {
272         struct irq_desc *desc = irq_to_desc(irq);
273         unsigned long flags;
274
275         if (!desc)
276                 return;
277
278         raw_spin_lock_irqsave(&desc->lock, flags);
279         if (nest)
280                 desc->status |= IRQ_NESTED_THREAD;
281         else
282                 desc->status &= ~IRQ_NESTED_THREAD;
283         raw_spin_unlock_irqrestore(&desc->lock, flags);
284 }
285 EXPORT_SYMBOL_GPL(set_irq_nested_thread);
286
287 /*
288  * default enable function
289  */
290 static void default_enable(struct irq_data *data)
291 {
292         struct irq_desc *desc = irq_data_to_desc(data);
293
294         desc->irq_data.chip->irq_unmask(&desc->irq_data);
295         desc->status &= ~IRQ_MASKED;
296 }
297
298 /*
299  * default disable function
300  */
301 static void default_disable(struct irq_data *data)
302 {
303 }
304
305 /*
306  * default startup function
307  */
308 static unsigned int default_startup(unsigned int irq)
309 {
310         struct irq_desc *desc = irq_to_desc(irq);
311
312         desc->irq_data.chip->irq_enable(&desc->irq_data);
313         return 0;
314 }
315
316 /*
317  * default shutdown function
318  */
319 static void default_shutdown(struct irq_data *data)
320 {
321         struct irq_desc *desc = irq_data_to_desc(data);
322
323         desc->irq_data.chip->irq_mask(&desc->irq_data);
324         desc->status |= IRQ_MASKED;
325 }
326
327 /* Temporary migration helpers */
328 static void compat_irq_mask(struct irq_data *data)
329 {
330         data->chip->mask(data->irq);
331 }
332
333 static void compat_irq_unmask(struct irq_data *data)
334 {
335         data->chip->unmask(data->irq);
336 }
337
338 static void compat_irq_ack(struct irq_data *data)
339 {
340         data->chip->ack(data->irq);
341 }
342
343 static void compat_irq_mask_ack(struct irq_data *data)
344 {
345         data->chip->mask_ack(data->irq);
346 }
347
348 static void compat_irq_eoi(struct irq_data *data)
349 {
350         data->chip->eoi(data->irq);
351 }
352
353 static void compat_irq_enable(struct irq_data *data)
354 {
355         data->chip->enable(data->irq);
356 }
357
358 static void compat_irq_disable(struct irq_data *data)
359 {
360         data->chip->disable(data->irq);
361 }
362
363 static void compat_irq_shutdown(struct irq_data *data)
364 {
365         data->chip->shutdown(data->irq);
366 }
367
368 static void compat_bus_lock(struct irq_data *data)
369 {
370         data->chip->bus_lock(data->irq);
371 }
372
373 static void compat_bus_sync_unlock(struct irq_data *data)
374 {
375         data->chip->bus_sync_unlock(data->irq);
376 }
377
378 /*
379  * Fixup enable/disable function pointers
380  */
381 void irq_chip_set_defaults(struct irq_chip *chip)
382 {
383         /*
384          * Compat fixup functions need to be before we set the
385          * defaults for enable/disable/startup/shutdown
386          */
387         if (chip->enable)
388                 chip->irq_enable = compat_irq_enable;
389         if (chip->disable)
390                 chip->irq_disable = compat_irq_disable;
391         if (chip->shutdown)
392                 chip->irq_shutdown = compat_irq_shutdown;
393
394         /*
395          * The real defaults
396          */
397         if (!chip->irq_enable)
398                 chip->irq_enable = default_enable;
399         if (!chip->irq_disable)
400                 chip->irq_disable = default_disable;
401         if (!chip->startup)
402                 chip->startup = default_startup;
403         /*
404          * We use chip->irq_disable, when the user provided its own. When
405          * we have default_disable set for chip->irq_disable, then we need
406          * to use default_shutdown, otherwise the irq line is not
407          * disabled on free_irq():
408          */
409         if (!chip->irq_shutdown)
410                 chip->irq_shutdown = chip->irq_disable != default_disable ?
411                         chip->irq_disable : default_shutdown;
412         if (!chip->end)
413                 chip->end = dummy_irq_chip.end;
414
415         /*
416          * Now fix up the remaining compat handlers
417          */
418         if (chip->bus_lock)
419                 chip->irq_bus_lock = compat_bus_lock;
420         if (chip->bus_sync_unlock)
421                 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
422         if (chip->mask)
423                 chip->irq_mask = compat_irq_mask;
424         if (chip->unmask)
425                 chip->irq_unmask = compat_irq_unmask;
426         if (chip->ack)
427                 chip->irq_ack = compat_irq_ack;
428         if (chip->mask_ack)
429                 chip->irq_mask_ack = compat_irq_mask_ack;
430         if (chip->eoi)
431                 chip->irq_eoi = compat_irq_eoi;
432 }
433
434 static inline void mask_ack_irq(struct irq_desc *desc)
435 {
436         if (desc->irq_data.chip->irq_mask_ack)
437                 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
438         else {
439                 desc->irq_data.chip->irq_mask(&desc->irq_data);
440                 if (desc->irq_data.chip->irq_ack)
441                         desc->irq_data.chip->irq_ack(&desc->irq_data);
442         }
443         desc->status |= IRQ_MASKED;
444 }
445
446 static inline void mask_irq(struct irq_desc *desc)
447 {
448         if (desc->irq_data.chip->irq_mask) {
449                 desc->irq_data.chip->irq_mask(&desc->irq_data);
450                 desc->status |= IRQ_MASKED;
451         }
452 }
453
454 static inline void unmask_irq(struct irq_desc *desc)
455 {
456         if (desc->irq_data.chip->irq_unmask) {
457                 desc->irq_data.chip->irq_unmask(&desc->irq_data);
458                 desc->status &= ~IRQ_MASKED;
459         }
460 }
461
462 /*
463  *      handle_nested_irq - Handle a nested irq from a irq thread
464  *      @irq:   the interrupt number
465  *
466  *      Handle interrupts which are nested into a threaded interrupt
467  *      handler. The handler function is called inside the calling
468  *      threads context.
469  */
470 void handle_nested_irq(unsigned int irq)
471 {
472         struct irq_desc *desc = irq_to_desc(irq);
473         struct irqaction *action;
474         irqreturn_t action_ret;
475
476         might_sleep();
477
478         raw_spin_lock_irq(&desc->lock);
479
480         kstat_incr_irqs_this_cpu(irq, desc);
481
482         action = desc->action;
483         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
484                 goto out_unlock;
485
486         desc->status |= IRQ_INPROGRESS;
487         raw_spin_unlock_irq(&desc->lock);
488
489         action_ret = action->thread_fn(action->irq, action->dev_id);
490         if (!noirqdebug)
491                 note_interrupt(irq, desc, action_ret);
492
493         raw_spin_lock_irq(&desc->lock);
494         desc->status &= ~IRQ_INPROGRESS;
495
496 out_unlock:
497         raw_spin_unlock_irq(&desc->lock);
498 }
499 EXPORT_SYMBOL_GPL(handle_nested_irq);
500
501 /**
502  *      handle_simple_irq - Simple and software-decoded IRQs.
503  *      @irq:   the interrupt number
504  *      @desc:  the interrupt description structure for this irq
505  *
506  *      Simple interrupts are either sent from a demultiplexing interrupt
507  *      handler or come from hardware, where no interrupt hardware control
508  *      is necessary.
509  *
510  *      Note: The caller is expected to handle the ack, clear, mask and
511  *      unmask issues if necessary.
512  */
513 void
514 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
515 {
516         struct irqaction *action;
517         irqreturn_t action_ret;
518
519         raw_spin_lock(&desc->lock);
520
521         if (unlikely(desc->status & IRQ_INPROGRESS))
522                 goto out_unlock;
523         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
524         kstat_incr_irqs_this_cpu(irq, desc);
525
526         action = desc->action;
527         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
528                 goto out_unlock;
529
530         desc->status |= IRQ_INPROGRESS;
531         raw_spin_unlock(&desc->lock);
532
533         action_ret = handle_IRQ_event(irq, action);
534         if (!noirqdebug)
535                 note_interrupt(irq, desc, action_ret);
536
537         raw_spin_lock(&desc->lock);
538         desc->status &= ~IRQ_INPROGRESS;
539 out_unlock:
540         raw_spin_unlock(&desc->lock);
541 }
542
543 /**
544  *      handle_level_irq - Level type irq handler
545  *      @irq:   the interrupt number
546  *      @desc:  the interrupt description structure for this irq
547  *
548  *      Level type interrupts are active as long as the hardware line has
549  *      the active level. This may require to mask the interrupt and unmask
550  *      it after the associated handler has acknowledged the device, so the
551  *      interrupt line is back to inactive.
552  */
553 void
554 handle_level_irq(unsigned int irq, struct irq_desc *desc)
555 {
556         struct irqaction *action;
557         irqreturn_t action_ret;
558
559         raw_spin_lock(&desc->lock);
560         mask_ack_irq(desc);
561
562         if (unlikely(desc->status & IRQ_INPROGRESS))
563                 goto out_unlock;
564         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
565         kstat_incr_irqs_this_cpu(irq, desc);
566
567         /*
568          * If its disabled or no action available
569          * keep it masked and get out of here
570          */
571         action = desc->action;
572         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
573                 goto out_unlock;
574
575         desc->status |= IRQ_INPROGRESS;
576         raw_spin_unlock(&desc->lock);
577
578         action_ret = handle_IRQ_event(irq, action);
579         if (!noirqdebug)
580                 note_interrupt(irq, desc, action_ret);
581
582         raw_spin_lock(&desc->lock);
583         desc->status &= ~IRQ_INPROGRESS;
584
585         if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
586                 unmask_irq(desc);
587 out_unlock:
588         raw_spin_unlock(&desc->lock);
589 }
590 EXPORT_SYMBOL_GPL(handle_level_irq);
591
592 /**
593  *      handle_fasteoi_irq - irq handler for transparent controllers
594  *      @irq:   the interrupt number
595  *      @desc:  the interrupt description structure for this irq
596  *
597  *      Only a single callback will be issued to the chip: an ->eoi()
598  *      call when the interrupt has been serviced. This enables support
599  *      for modern forms of interrupt handlers, which handle the flow
600  *      details in hardware, transparently.
601  */
602 void
603 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
604 {
605         struct irqaction *action;
606         irqreturn_t action_ret;
607
608         raw_spin_lock(&desc->lock);
609
610         if (unlikely(desc->status & IRQ_INPROGRESS))
611                 goto out;
612
613         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
614         kstat_incr_irqs_this_cpu(irq, desc);
615
616         /*
617          * If its disabled or no action available
618          * then mask it and get out of here:
619          */
620         action = desc->action;
621         if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
622                 desc->status |= IRQ_PENDING;
623                 mask_irq(desc);
624                 goto out;
625         }
626
627         desc->status |= IRQ_INPROGRESS;
628         desc->status &= ~IRQ_PENDING;
629         raw_spin_unlock(&desc->lock);
630
631         action_ret = handle_IRQ_event(irq, action);
632         if (!noirqdebug)
633                 note_interrupt(irq, desc, action_ret);
634
635         raw_spin_lock(&desc->lock);
636         desc->status &= ~IRQ_INPROGRESS;
637 out:
638         desc->irq_data.chip->irq_eoi(&desc->irq_data);
639
640         raw_spin_unlock(&desc->lock);
641 }
642
643 /**
644  *      handle_edge_irq - edge type IRQ handler
645  *      @irq:   the interrupt number
646  *      @desc:  the interrupt description structure for this irq
647  *
648  *      Interrupt occures on the falling and/or rising edge of a hardware
649  *      signal. The occurence is latched into the irq controller hardware
650  *      and must be acked in order to be reenabled. After the ack another
651  *      interrupt can happen on the same source even before the first one
652  *      is handled by the associated event handler. If this happens it
653  *      might be necessary to disable (mask) the interrupt depending on the
654  *      controller hardware. This requires to reenable the interrupt inside
655  *      of the loop which handles the interrupts which have arrived while
656  *      the handler was running. If all pending interrupts are handled, the
657  *      loop is left.
658  */
659 void
660 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
661 {
662         raw_spin_lock(&desc->lock);
663
664         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
665
666         /*
667          * If we're currently running this IRQ, or its disabled,
668          * we shouldn't process the IRQ. Mark it pending, handle
669          * the necessary masking and go out
670          */
671         if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
672                     !desc->action)) {
673                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
674                 mask_ack_irq(desc);
675                 goto out_unlock;
676         }
677         kstat_incr_irqs_this_cpu(irq, desc);
678
679         /* Start handling the irq */
680         desc->irq_data.chip->irq_ack(&desc->irq_data);
681
682         /* Mark the IRQ currently in progress.*/
683         desc->status |= IRQ_INPROGRESS;
684
685         do {
686                 struct irqaction *action = desc->action;
687                 irqreturn_t action_ret;
688
689                 if (unlikely(!action)) {
690                         mask_irq(desc);
691                         goto out_unlock;
692                 }
693
694                 /*
695                  * When another irq arrived while we were handling
696                  * one, we could have masked the irq.
697                  * Renable it, if it was not disabled in meantime.
698                  */
699                 if (unlikely((desc->status &
700                                (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
701                               (IRQ_PENDING | IRQ_MASKED))) {
702                         unmask_irq(desc);
703                 }
704
705                 desc->status &= ~IRQ_PENDING;
706                 raw_spin_unlock(&desc->lock);
707                 action_ret = handle_IRQ_event(irq, action);
708                 if (!noirqdebug)
709                         note_interrupt(irq, desc, action_ret);
710                 raw_spin_lock(&desc->lock);
711
712         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
713
714         desc->status &= ~IRQ_INPROGRESS;
715 out_unlock:
716         raw_spin_unlock(&desc->lock);
717 }
718
719 /**
720  *      handle_percpu_irq - Per CPU local irq handler
721  *      @irq:   the interrupt number
722  *      @desc:  the interrupt description structure for this irq
723  *
724  *      Per CPU interrupts on SMP machines without locking requirements
725  */
726 void
727 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
728 {
729         irqreturn_t action_ret;
730
731         kstat_incr_irqs_this_cpu(irq, desc);
732
733         if (desc->irq_data.chip->irq_ack)
734                 desc->irq_data.chip->irq_ack(&desc->irq_data);
735
736         action_ret = handle_IRQ_event(irq, desc->action);
737         if (!noirqdebug)
738                 note_interrupt(irq, desc, action_ret);
739
740         if (desc->irq_data.chip->irq_eoi)
741                 desc->irq_data.chip->irq_eoi(&desc->irq_data);
742 }
743
744 void
745 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
746                   const char *name)
747 {
748         struct irq_desc *desc = irq_to_desc(irq);
749         unsigned long flags;
750
751         if (!desc) {
752                 printk(KERN_ERR
753                        "Trying to install type control for IRQ%d\n", irq);
754                 return;
755         }
756
757         if (!handle)
758                 handle = handle_bad_irq;
759         else if (desc->irq_data.chip == &no_irq_chip) {
760                 printk(KERN_WARNING "Trying to install %sinterrupt handler "
761                        "for IRQ%d\n", is_chained ? "chained " : "", irq);
762                 /*
763                  * Some ARM implementations install a handler for really dumb
764                  * interrupt hardware without setting an irq_chip. This worked
765                  * with the ARM no_irq_chip but the check in setup_irq would
766                  * prevent us to setup the interrupt at all. Switch it to
767                  * dummy_irq_chip for easy transition.
768                  */
769                 desc->irq_data.chip = &dummy_irq_chip;
770         }
771
772         chip_bus_lock(desc);
773         raw_spin_lock_irqsave(&desc->lock, flags);
774
775         /* Uninstall? */
776         if (handle == handle_bad_irq) {
777                 if (desc->irq_data.chip != &no_irq_chip)
778                         mask_ack_irq(desc);
779                 desc->status |= IRQ_DISABLED;
780                 desc->depth = 1;
781         }
782         desc->handle_irq = handle;
783         desc->name = name;
784
785         if (handle != handle_bad_irq && is_chained) {
786                 desc->status &= ~IRQ_DISABLED;
787                 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
788                 desc->depth = 0;
789                 desc->irq_data.chip->startup(irq);
790         }
791         raw_spin_unlock_irqrestore(&desc->lock, flags);
792         chip_bus_sync_unlock(desc);
793 }
794 EXPORT_SYMBOL_GPL(__set_irq_handler);
795
796 void
797 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
798                          irq_flow_handler_t handle)
799 {
800         set_irq_chip(irq, chip);
801         __set_irq_handler(irq, handle, 0, NULL);
802 }
803
804 void
805 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
806                               irq_flow_handler_t handle, const char *name)
807 {
808         set_irq_chip(irq, chip);
809         __set_irq_handler(irq, handle, 0, name);
810 }
811
812 void set_irq_noprobe(unsigned int irq)
813 {
814         struct irq_desc *desc = irq_to_desc(irq);
815         unsigned long flags;
816
817         if (!desc) {
818                 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
819                 return;
820         }
821
822         raw_spin_lock_irqsave(&desc->lock, flags);
823         desc->status |= IRQ_NOPROBE;
824         raw_spin_unlock_irqrestore(&desc->lock, flags);
825 }
826
827 void set_irq_probe(unsigned int irq)
828 {
829         struct irq_desc *desc = irq_to_desc(irq);
830         unsigned long flags;
831
832         if (!desc) {
833                 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
834                 return;
835         }
836
837         raw_spin_lock_irqsave(&desc->lock, flags);
838         desc->status &= ~IRQ_NOPROBE;
839         raw_spin_unlock_irqrestore(&desc->lock, flags);
840 }