Pull acpi_device_handle_cleanup into release branch
[pandora-kernel.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "css.h"
22 #include "device.h"
23 #include "chsc.h"
24 #include "ioasm.h"
25
26 int
27 device_is_online(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_ONLINE);
35 }
36
37 int
38 device_is_disconnected(struct subchannel *sch)
39 {
40         struct ccw_device *cdev;
41
42         if (!sch->dev.driver_data)
43                 return 0;
44         cdev = sch->dev.driver_data;
45         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
46                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
47 }
48
49 void
50 device_set_disconnected(struct subchannel *sch)
51 {
52         struct ccw_device *cdev;
53
54         if (!sch->dev.driver_data)
55                 return;
56         cdev = sch->dev.driver_data;
57         ccw_device_set_timeout(cdev, 0);
58         cdev->private->flags.fake_irb = 0;
59         cdev->private->state = DEV_STATE_DISCONNECTED;
60 }
61
62 void
63 device_set_waiting(struct subchannel *sch)
64 {
65         struct ccw_device *cdev;
66
67         if (!sch->dev.driver_data)
68                 return;
69         cdev = sch->dev.driver_data;
70         ccw_device_set_timeout(cdev, 10*HZ);
71         cdev->private->state = DEV_STATE_WAIT4IO;
72 }
73
74 /*
75  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
76  */
77 static void
78 ccw_device_timeout(unsigned long data)
79 {
80         struct ccw_device *cdev;
81
82         cdev = (struct ccw_device *) data;
83         spin_lock_irq(cdev->ccwlock);
84         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
85         spin_unlock_irq(cdev->ccwlock);
86 }
87
88 /*
89  * Set timeout
90  */
91 void
92 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
93 {
94         if (expires == 0) {
95                 del_timer(&cdev->private->timer);
96                 return;
97         }
98         if (timer_pending(&cdev->private->timer)) {
99                 if (mod_timer(&cdev->private->timer, jiffies + expires))
100                         return;
101         }
102         cdev->private->timer.function = ccw_device_timeout;
103         cdev->private->timer.data = (unsigned long) cdev;
104         cdev->private->timer.expires = jiffies + expires;
105         add_timer(&cdev->private->timer);
106 }
107
108 /* Kill any pending timers after machine check. */
109 void
110 device_kill_pending_timer(struct subchannel *sch)
111 {
112         struct ccw_device *cdev;
113
114         if (!sch->dev.driver_data)
115                 return;
116         cdev = sch->dev.driver_data;
117         ccw_device_set_timeout(cdev, 0);
118 }
119
120 /*
121  * Cancel running i/o. This is called repeatedly since halt/clear are
122  * asynchronous operations. We do one try with cio_cancel, two tries
123  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
124  * Returns 0 if device now idle, -ENODEV for device not operational and
125  * -EBUSY if an interrupt is expected (either from halt/clear or from a
126  * status pending).
127  */
128 int
129 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
130 {
131         struct subchannel *sch;
132         int ret;
133
134         sch = to_subchannel(cdev->dev.parent);
135         ret = stsch(sch->schid, &sch->schib);
136         if (ret || !sch->schib.pmcw.dnv)
137                 return -ENODEV; 
138         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
139                 /* Not operational or no activity -> done. */
140                 return 0;
141         /* Stage 1: cancel io. */
142         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
143             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
144                 ret = cio_cancel(sch);
145                 if (ret != -EINVAL)
146                         return ret;
147                 /* cancel io unsuccessful. From now on it is asynchronous. */
148                 cdev->private->iretry = 3;      /* 3 halt retries. */
149         }
150         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
151                 /* Stage 2: halt io. */
152                 if (cdev->private->iretry) {
153                         cdev->private->iretry--;
154                         ret = cio_halt(sch);
155                         return (ret == 0) ? -EBUSY : ret;
156                 }
157                 /* halt io unsuccessful. */
158                 cdev->private->iretry = 255;    /* 255 clear retries. */
159         }
160         /* Stage 3: clear io. */
161         if (cdev->private->iretry) {
162                 cdev->private->iretry--;
163                 ret = cio_clear (sch);
164                 return (ret == 0) ? -EBUSY : ret;
165         }
166         panic("Can't stop i/o on subchannel.\n");
167 }
168
169 static int
170 ccw_device_handle_oper(struct ccw_device *cdev)
171 {
172         struct subchannel *sch;
173
174         sch = to_subchannel(cdev->dev.parent);
175         cdev->private->flags.recog_done = 1;
176         /*
177          * Check if cu type and device type still match. If
178          * not, it is certainly another device and we have to
179          * de- and re-register. Also check here for non-matching devno.
180          */
181         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
182             cdev->id.cu_model != cdev->private->senseid.cu_model ||
183             cdev->id.dev_type != cdev->private->senseid.dev_type ||
184             cdev->id.dev_model != cdev->private->senseid.dev_model ||
185             cdev->private->devno != sch->schib.pmcw.dev) {
186                 PREPARE_WORK(&cdev->private->kick_work,
187                              ccw_device_do_unreg_rereg, (void *)cdev);
188                 queue_work(ccw_device_work, &cdev->private->kick_work);
189                 return 0;
190         }
191         cdev->private->flags.donotify = 1;
192         return 1;
193 }
194
195 /*
196  * The machine won't give us any notification by machine check if a chpid has
197  * been varied online on the SE so we have to find out by magic (i. e. driving
198  * the channel subsystem to device selection and updating our path masks).
199  */
200 static inline void
201 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
202 {
203         int mask, i;
204
205         for (i = 0; i<8; i++) {
206                 mask = 0x80 >> i;
207                 if (!(sch->lpm & mask))
208                         continue;
209                 if (old_lpm & mask)
210                         continue;
211                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
212         }
213 }
214
215 /*
216  * Stop device recognition.
217  */
218 static void
219 ccw_device_recog_done(struct ccw_device *cdev, int state)
220 {
221         struct subchannel *sch;
222         int notify, old_lpm, same_dev;
223
224         sch = to_subchannel(cdev->dev.parent);
225
226         ccw_device_set_timeout(cdev, 0);
227         cio_disable_subchannel(sch);
228         /*
229          * Now that we tried recognition, we have performed device selection
230          * through ssch() and the path information is up to date.
231          */
232         old_lpm = sch->lpm;
233         stsch(sch->schid, &sch->schib);
234         sch->lpm = sch->schib.pmcw.pim &
235                 sch->schib.pmcw.pam &
236                 sch->schib.pmcw.pom &
237                 sch->opm;
238         /* Check since device may again have become not operational. */
239         if (!sch->schib.pmcw.dnv)
240                 state = DEV_STATE_NOT_OPER;
241         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
242                 /* Force reprobe on all chpids. */
243                 old_lpm = 0;
244         if (sch->lpm != old_lpm)
245                 __recover_lost_chpids(sch, old_lpm);
246         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
247                 if (state == DEV_STATE_NOT_OPER) {
248                         cdev->private->flags.recog_done = 1;
249                         cdev->private->state = DEV_STATE_DISCONNECTED;
250                         return;
251                 }
252                 /* Boxed devices don't need extra treatment. */
253         }
254         notify = 0;
255         same_dev = 0; /* Keep the compiler quiet... */
256         switch (state) {
257         case DEV_STATE_NOT_OPER:
258                 CIO_DEBUG(KERN_WARNING, 2,
259                           "SenseID : unknown device %04x on subchannel "
260                           "0.%x.%04x\n", cdev->private->devno,
261                           sch->schid.ssid, sch->schid.sch_no);
262                 break;
263         case DEV_STATE_OFFLINE:
264                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
265                         same_dev = ccw_device_handle_oper(cdev);
266                         notify = 1;
267                 }
268                 /* fill out sense information */
269                 cdev->id = (struct ccw_device_id) {
270                         .cu_type   = cdev->private->senseid.cu_type,
271                         .cu_model  = cdev->private->senseid.cu_model,
272                         .dev_type  = cdev->private->senseid.dev_type,
273                         .dev_model = cdev->private->senseid.dev_model,
274                 };
275                 if (notify) {
276                         cdev->private->state = DEV_STATE_OFFLINE;
277                         if (same_dev) {
278                                 /* Get device online again. */
279                                 ccw_device_online(cdev);
280                                 wake_up(&cdev->private->wait_q);
281                         }
282                         return;
283                 }
284                 /* Issue device info message. */
285                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
286                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
287                           "%04X/%02X\n",
288                           cdev->private->ssid, cdev->private->devno,
289                           cdev->id.cu_type, cdev->id.cu_model,
290                           cdev->id.dev_type, cdev->id.dev_model);
291                 break;
292         case DEV_STATE_BOXED:
293                 CIO_DEBUG(KERN_WARNING, 2,
294                           "SenseID : boxed device %04x on subchannel "
295                           "0.%x.%04x\n", cdev->private->devno,
296                           sch->schid.ssid, sch->schid.sch_no);
297                 break;
298         }
299         cdev->private->state = state;
300         io_subchannel_recog_done(cdev);
301         if (state != DEV_STATE_NOT_OPER)
302                 wake_up(&cdev->private->wait_q);
303 }
304
305 /*
306  * Function called from device_id.c after sense id has completed.
307  */
308 void
309 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
310 {
311         switch (err) {
312         case 0:
313                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
314                 break;
315         case -ETIME:            /* Sense id stopped by timeout. */
316                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
317                 break;
318         default:
319                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
320                 break;
321         }
322 }
323
324 static void
325 ccw_device_oper_notify(void *data)
326 {
327         struct ccw_device *cdev;
328         struct subchannel *sch;
329         int ret;
330
331         cdev = (struct ccw_device *)data;
332         sch = to_subchannel(cdev->dev.parent);
333         ret = (sch->driver && sch->driver->notify) ?
334                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
335         if (!ret)
336                 /* Driver doesn't want device back. */
337                 ccw_device_do_unreg_rereg((void *)cdev);
338         else {
339                 /* Reenable channel measurements, if needed. */
340                 cmf_reenable(cdev);
341                 wake_up(&cdev->private->wait_q);
342         }
343 }
344
345 /*
346  * Finished with online/offline processing.
347  */
348 static void
349 ccw_device_done(struct ccw_device *cdev, int state)
350 {
351         struct subchannel *sch;
352
353         sch = to_subchannel(cdev->dev.parent);
354
355         if (state != DEV_STATE_ONLINE)
356                 cio_disable_subchannel(sch);
357
358         /* Reset device status. */
359         memset(&cdev->private->irb, 0, sizeof(struct irb));
360
361         cdev->private->state = state;
362
363
364         if (state == DEV_STATE_BOXED)
365                 CIO_DEBUG(KERN_WARNING, 2,
366                           "Boxed device %04x on subchannel %04x\n",
367                           cdev->private->devno, sch->schid.sch_no);
368
369         if (cdev->private->flags.donotify) {
370                 cdev->private->flags.donotify = 0;
371                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
372                              (void *)cdev);
373                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
374         }
375         wake_up(&cdev->private->wait_q);
376
377         if (css_init_done && state != DEV_STATE_ONLINE)
378                 put_device (&cdev->dev);
379 }
380
381 /*
382  * Function called from device_pgid.c after sense path ground has completed.
383  */
384 void
385 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
386 {
387         struct subchannel *sch;
388
389         sch = to_subchannel(cdev->dev.parent);
390         switch (err) {
391         case 0:
392                 /* Start Path Group verification. */
393                 sch->vpm = 0;   /* Start with no path groups set. */
394                 cdev->private->state = DEV_STATE_VERIFY;
395                 ccw_device_verify_start(cdev);
396                 break;
397         case -ETIME:            /* Sense path group id stopped by timeout. */
398         case -EUSERS:           /* device is reserved for someone else. */
399                 ccw_device_done(cdev, DEV_STATE_BOXED);
400                 break;
401         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
402                 cdev->private->options.pgroup = 0;
403                 ccw_device_done(cdev, DEV_STATE_ONLINE);
404                 break;
405         default:
406                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
407                 break;
408         }
409 }
410
411 /*
412  * Start device recognition.
413  */
414 int
415 ccw_device_recognition(struct ccw_device *cdev)
416 {
417         struct subchannel *sch;
418         int ret;
419
420         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
421             (cdev->private->state != DEV_STATE_BOXED))
422                 return -EINVAL;
423         sch = to_subchannel(cdev->dev.parent);
424         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
425         if (ret != 0)
426                 /* Couldn't enable the subchannel for i/o. Sick device. */
427                 return ret;
428
429         /* After 60s the device recognition is considered to have failed. */
430         ccw_device_set_timeout(cdev, 60*HZ);
431
432         /*
433          * We used to start here with a sense pgid to find out whether a device
434          * is locked by someone else. Unfortunately, the sense pgid command
435          * code has other meanings on devices predating the path grouping
436          * algorithm, so we start with sense id and box the device after an
437          * timeout (or if sense pgid during path verification detects the device
438          * is locked, as may happen on newer devices).
439          */
440         cdev->private->flags.recog_done = 0;
441         cdev->private->state = DEV_STATE_SENSE_ID;
442         ccw_device_sense_id_start(cdev);
443         return 0;
444 }
445
446 /*
447  * Handle timeout in device recognition.
448  */
449 static void
450 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
451 {
452         int ret;
453
454         ret = ccw_device_cancel_halt_clear(cdev);
455         switch (ret) {
456         case 0:
457                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
458                 break;
459         case -ENODEV:
460                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
461                 break;
462         default:
463                 ccw_device_set_timeout(cdev, 3*HZ);
464         }
465 }
466
467
468 static void
469 ccw_device_nopath_notify(void *data)
470 {
471         struct ccw_device *cdev;
472         struct subchannel *sch;
473         int ret;
474
475         cdev = (struct ccw_device *)data;
476         sch = to_subchannel(cdev->dev.parent);
477         /* Extra sanity. */
478         if (sch->lpm)
479                 return;
480         ret = (sch->driver && sch->driver->notify) ?
481                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
482         if (!ret) {
483                 if (get_device(&sch->dev)) {
484                         /* Driver doesn't want to keep device. */
485                         cio_disable_subchannel(sch);
486                         if (get_device(&cdev->dev)) {
487                                 PREPARE_WORK(&cdev->private->kick_work,
488                                              ccw_device_call_sch_unregister,
489                                              (void *)cdev);
490                                 queue_work(ccw_device_work,
491                                            &cdev->private->kick_work);
492                         } else
493                                 put_device(&sch->dev);
494                 }
495         } else {
496                 cio_disable_subchannel(sch);
497                 ccw_device_set_timeout(cdev, 0);
498                 cdev->private->flags.fake_irb = 0;
499                 cdev->private->state = DEV_STATE_DISCONNECTED;
500                 wake_up(&cdev->private->wait_q);
501         }
502 }
503
504 void
505 ccw_device_verify_done(struct ccw_device *cdev, int err)
506 {
507         cdev->private->flags.doverify = 0;
508         switch (err) {
509         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
510                 cdev->private->options.pgroup = 0;
511         case 0:
512                 ccw_device_done(cdev, DEV_STATE_ONLINE);
513                 /* Deliver fake irb to device driver, if needed. */
514                 if (cdev->private->flags.fake_irb) {
515                         memset(&cdev->private->irb, 0, sizeof(struct irb));
516                         cdev->private->irb.scsw = (struct scsw) {
517                                 .cc = 1,
518                                 .fctl = SCSW_FCTL_START_FUNC,
519                                 .actl = SCSW_ACTL_START_PEND,
520                                 .stctl = SCSW_STCTL_STATUS_PEND,
521                         };
522                         cdev->private->flags.fake_irb = 0;
523                         if (cdev->handler)
524                                 cdev->handler(cdev, cdev->private->intparm,
525                                               &cdev->private->irb);
526                         memset(&cdev->private->irb, 0, sizeof(struct irb));
527                 }
528                 break;
529         case -ETIME:
530                 ccw_device_done(cdev, DEV_STATE_BOXED);
531                 break;
532         default:
533                 PREPARE_WORK(&cdev->private->kick_work,
534                              ccw_device_nopath_notify, (void *)cdev);
535                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
536                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
537                 break;
538         }
539 }
540
541 /*
542  * Get device online.
543  */
544 int
545 ccw_device_online(struct ccw_device *cdev)
546 {
547         struct subchannel *sch;
548         int ret;
549
550         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
551             (cdev->private->state != DEV_STATE_BOXED))
552                 return -EINVAL;
553         sch = to_subchannel(cdev->dev.parent);
554         if (css_init_done && !get_device(&cdev->dev))
555                 return -ENODEV;
556         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
557         if (ret != 0) {
558                 /* Couldn't enable the subchannel for i/o. Sick device. */
559                 if (ret == -ENODEV)
560                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
561                 return ret;
562         }
563         /* Do we want to do path grouping? */
564         if (!cdev->private->options.pgroup) {
565                 /* No, set state online immediately. */
566                 ccw_device_done(cdev, DEV_STATE_ONLINE);
567                 return 0;
568         }
569         /* Do a SensePGID first. */
570         cdev->private->state = DEV_STATE_SENSE_PGID;
571         ccw_device_sense_pgid_start(cdev);
572         return 0;
573 }
574
575 void
576 ccw_device_disband_done(struct ccw_device *cdev, int err)
577 {
578         switch (err) {
579         case 0:
580                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
581                 break;
582         case -ETIME:
583                 ccw_device_done(cdev, DEV_STATE_BOXED);
584                 break;
585         default:
586                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
587                 break;
588         }
589 }
590
591 /*
592  * Shutdown device.
593  */
594 int
595 ccw_device_offline(struct ccw_device *cdev)
596 {
597         struct subchannel *sch;
598
599         sch = to_subchannel(cdev->dev.parent);
600         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
601                 return -ENODEV;
602         if (cdev->private->state != DEV_STATE_ONLINE) {
603                 if (sch->schib.scsw.actl != 0)
604                         return -EBUSY;
605                 return -EINVAL;
606         }
607         if (sch->schib.scsw.actl != 0)
608                 return -EBUSY;
609         /* Are we doing path grouping? */
610         if (!cdev->private->options.pgroup) {
611                 /* No, set state offline immediately. */
612                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
613                 return 0;
614         }
615         /* Start Set Path Group commands. */
616         cdev->private->state = DEV_STATE_DISBAND_PGID;
617         ccw_device_disband_start(cdev);
618         return 0;
619 }
620
621 /*
622  * Handle timeout in device online/offline process.
623  */
624 static void
625 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
626 {
627         int ret;
628
629         ret = ccw_device_cancel_halt_clear(cdev);
630         switch (ret) {
631         case 0:
632                 ccw_device_done(cdev, DEV_STATE_BOXED);
633                 break;
634         case -ENODEV:
635                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
636                 break;
637         default:
638                 ccw_device_set_timeout(cdev, 3*HZ);
639         }
640 }
641
642 /*
643  * Handle not oper event in device recognition.
644  */
645 static void
646 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
647 {
648         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
649 }
650
651 /*
652  * Handle not operational event while offline.
653  */
654 static void
655 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
656 {
657         struct subchannel *sch;
658
659         cdev->private->state = DEV_STATE_NOT_OPER;
660         sch = to_subchannel(cdev->dev.parent);
661         if (get_device(&cdev->dev)) {
662                 PREPARE_WORK(&cdev->private->kick_work,
663                              ccw_device_call_sch_unregister, (void *)cdev);
664                 queue_work(ccw_device_work, &cdev->private->kick_work);
665         }
666         wake_up(&cdev->private->wait_q);
667 }
668
669 /*
670  * Handle not operational event while online.
671  */
672 static void
673 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
674 {
675         struct subchannel *sch;
676
677         sch = to_subchannel(cdev->dev.parent);
678         if (sch->driver->notify &&
679             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
680                         ccw_device_set_timeout(cdev, 0);
681                         cdev->private->flags.fake_irb = 0;
682                         cdev->private->state = DEV_STATE_DISCONNECTED;
683                         wake_up(&cdev->private->wait_q);
684                         return;
685         }
686         cdev->private->state = DEV_STATE_NOT_OPER;
687         cio_disable_subchannel(sch);
688         if (sch->schib.scsw.actl != 0) {
689                 // FIXME: not-oper indication to device driver ?
690                 ccw_device_call_handler(cdev);
691         }
692         if (get_device(&cdev->dev)) {
693                 PREPARE_WORK(&cdev->private->kick_work,
694                              ccw_device_call_sch_unregister, (void *)cdev);
695                 queue_work(ccw_device_work, &cdev->private->kick_work);
696         }
697         wake_up(&cdev->private->wait_q);
698 }
699
700 /*
701  * Handle path verification event.
702  */
703 static void
704 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
705 {
706         struct subchannel *sch;
707
708         if (!cdev->private->options.pgroup)
709                 return;
710         if (cdev->private->state == DEV_STATE_W4SENSE) {
711                 cdev->private->flags.doverify = 1;
712                 return;
713         }
714         sch = to_subchannel(cdev->dev.parent);
715         /*
716          * Since we might not just be coming from an interrupt from the
717          * subchannel we have to update the schib.
718          */
719         stsch(sch->schid, &sch->schib);
720
721         if (sch->schib.scsw.actl != 0 ||
722             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
723                 /*
724                  * No final status yet or final status not yet delivered
725                  * to the device driver. Can't do path verfication now,
726                  * delay until final status was delivered.
727                  */
728                 cdev->private->flags.doverify = 1;
729                 return;
730         }
731         /* Device is idle, we can do the path verification. */
732         cdev->private->state = DEV_STATE_VERIFY;
733         ccw_device_verify_start(cdev);
734 }
735
736 /*
737  * Got an interrupt for a normal io (state online).
738  */
739 static void
740 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
741 {
742         struct irb *irb;
743
744         irb = (struct irb *) __LC_IRB;
745         /* Check for unsolicited interrupt. */
746         if ((irb->scsw.stctl ==
747                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
748             && (!irb->scsw.cc)) {
749                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
750                     !irb->esw.esw0.erw.cons) {
751                         /* Unit check but no sense data. Need basic sense. */
752                         if (ccw_device_do_sense(cdev, irb) != 0)
753                                 goto call_handler_unsol;
754                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
755                         cdev->private->state = DEV_STATE_W4SENSE;
756                         cdev->private->intparm = 0;
757                         return;
758                 }
759 call_handler_unsol:
760                 if (cdev->handler)
761                         cdev->handler (cdev, 0, irb);
762                 return;
763         }
764         /* Accumulate status and find out if a basic sense is needed. */
765         ccw_device_accumulate_irb(cdev, irb);
766         if (cdev->private->flags.dosense) {
767                 if (ccw_device_do_sense(cdev, irb) == 0) {
768                         cdev->private->state = DEV_STATE_W4SENSE;
769                 }
770                 return;
771         }
772         /* Call the handler. */
773         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
774                 /* Start delayed path verification. */
775                 ccw_device_online_verify(cdev, 0);
776 }
777
778 /*
779  * Got an timeout in online state.
780  */
781 static void
782 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
783 {
784         int ret;
785
786         ccw_device_set_timeout(cdev, 0);
787         ret = ccw_device_cancel_halt_clear(cdev);
788         if (ret == -EBUSY) {
789                 ccw_device_set_timeout(cdev, 3*HZ);
790                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
791                 return;
792         }
793         if (ret == -ENODEV) {
794                 struct subchannel *sch;
795
796                 sch = to_subchannel(cdev->dev.parent);
797                 if (!sch->lpm) {
798                         PREPARE_WORK(&cdev->private->kick_work,
799                                      ccw_device_nopath_notify, (void *)cdev);
800                         queue_work(ccw_device_notify_work,
801                                    &cdev->private->kick_work);
802                 } else
803                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
804         } else if (cdev->handler)
805                 cdev->handler(cdev, cdev->private->intparm,
806                               ERR_PTR(-ETIMEDOUT));
807 }
808
809 /*
810  * Got an interrupt for a basic sense.
811  */
812 void
813 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
814 {
815         struct irb *irb;
816
817         irb = (struct irb *) __LC_IRB;
818         /* Check for unsolicited interrupt. */
819         if (irb->scsw.stctl ==
820                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
821                 if (irb->scsw.cc == 1)
822                         /* Basic sense hasn't started. Try again. */
823                         ccw_device_do_sense(cdev, irb);
824                 else {
825                         printk("Huh? %s(%s): unsolicited interrupt...\n",
826                                __FUNCTION__, cdev->dev.bus_id);
827                         if (cdev->handler)
828                                 cdev->handler (cdev, 0, irb);
829                 }
830                 return;
831         }
832         /*
833          * Check if a halt or clear has been issued in the meanwhile. If yes,
834          * only deliver the halt/clear interrupt to the device driver as if it
835          * had killed the original request.
836          */
837         if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
838                 cdev->private->flags.dosense = 0;
839                 memset(&cdev->private->irb, 0, sizeof(struct irb));
840                 ccw_device_accumulate_irb(cdev, irb);
841                 goto call_handler;
842         }
843         /* Add basic sense info to irb. */
844         ccw_device_accumulate_basic_sense(cdev, irb);
845         if (cdev->private->flags.dosense) {
846                 /* Another basic sense is needed. */
847                 ccw_device_do_sense(cdev, irb);
848                 return;
849         }
850 call_handler:
851         cdev->private->state = DEV_STATE_ONLINE;
852         /* Call the handler. */
853         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
854                 /* Start delayed path verification. */
855                 ccw_device_online_verify(cdev, 0);
856 }
857
858 static void
859 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
860 {
861         struct irb *irb;
862
863         irb = (struct irb *) __LC_IRB;
864         /* Accumulate status. We don't do basic sense. */
865         ccw_device_accumulate_irb(cdev, irb);
866         /* Remember to clear irb to avoid residuals. */
867         memset(&cdev->private->irb, 0, sizeof(struct irb));
868         /* Try to start delayed device verification. */
869         ccw_device_online_verify(cdev, 0);
870         /* Note: Don't call handler for cio initiated clear! */
871 }
872
873 static void
874 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
875 {
876         struct subchannel *sch;
877
878         sch = to_subchannel(cdev->dev.parent);
879         ccw_device_set_timeout(cdev, 0);
880         /* OK, i/o is dead now. Call interrupt handler. */
881         cdev->private->state = DEV_STATE_ONLINE;
882         if (cdev->handler)
883                 cdev->handler(cdev, cdev->private->intparm,
884                               ERR_PTR(-ETIMEDOUT));
885         if (!sch->lpm) {
886                 PREPARE_WORK(&cdev->private->kick_work,
887                              ccw_device_nopath_notify, (void *)cdev);
888                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
889         } else if (cdev->private->flags.doverify)
890                 /* Start delayed path verification. */
891                 ccw_device_online_verify(cdev, 0);
892 }
893
894 static void
895 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
896 {
897         int ret;
898
899         ret = ccw_device_cancel_halt_clear(cdev);
900         if (ret == -EBUSY) {
901                 ccw_device_set_timeout(cdev, 3*HZ);
902                 return;
903         }
904         if (ret == -ENODEV) {
905                 struct subchannel *sch;
906
907                 sch = to_subchannel(cdev->dev.parent);
908                 if (!sch->lpm) {
909                         PREPARE_WORK(&cdev->private->kick_work,
910                                      ccw_device_nopath_notify, (void *)cdev);
911                         queue_work(ccw_device_notify_work,
912                                    &cdev->private->kick_work);
913                 } else
914                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
915                 return;
916         }
917         //FIXME: Can we get here?
918         cdev->private->state = DEV_STATE_ONLINE;
919         if (cdev->handler)
920                 cdev->handler(cdev, cdev->private->intparm,
921                               ERR_PTR(-ETIMEDOUT));
922 }
923
924 static void
925 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
926 {
927         struct irb *irb;
928         struct subchannel *sch;
929
930         irb = (struct irb *) __LC_IRB;
931         /*
932          * Accumulate status and find out if a basic sense is needed.
933          * This is fine since we have already adapted the lpm.
934          */
935         ccw_device_accumulate_irb(cdev, irb);
936         if (cdev->private->flags.dosense) {
937                 if (ccw_device_do_sense(cdev, irb) == 0) {
938                         cdev->private->state = DEV_STATE_W4SENSE;
939                 }
940                 return;
941         }
942
943         /* Iff device is idle, reset timeout. */
944         sch = to_subchannel(cdev->dev.parent);
945         if (!stsch(sch->schid, &sch->schib))
946                 if (sch->schib.scsw.actl == 0)
947                         ccw_device_set_timeout(cdev, 0);
948         /* Call the handler. */
949         ccw_device_call_handler(cdev);
950         if (!sch->lpm) {
951                 PREPARE_WORK(&cdev->private->kick_work,
952                              ccw_device_nopath_notify, (void *)cdev);
953                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
954         } else if (cdev->private->flags.doverify)
955                 ccw_device_online_verify(cdev, 0);
956 }
957
958 static void
959 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
960 {
961         int ret;
962         struct subchannel *sch;
963
964         sch = to_subchannel(cdev->dev.parent);
965         ccw_device_set_timeout(cdev, 0);
966         ret = ccw_device_cancel_halt_clear(cdev);
967         if (ret == -EBUSY) {
968                 ccw_device_set_timeout(cdev, 3*HZ);
969                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
970                 return;
971         }
972         if (ret == -ENODEV) {
973                 if (!sch->lpm) {
974                         PREPARE_WORK(&cdev->private->kick_work,
975                                      ccw_device_nopath_notify, (void *)cdev);
976                         queue_work(ccw_device_notify_work,
977                                    &cdev->private->kick_work);
978                 } else
979                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
980                 return;
981         }
982         if (cdev->handler)
983                 cdev->handler(cdev, cdev->private->intparm,
984                               ERR_PTR(-ETIMEDOUT));
985         if (!sch->lpm) {
986                 PREPARE_WORK(&cdev->private->kick_work,
987                              ccw_device_nopath_notify, (void *)cdev);
988                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
989         } else if (cdev->private->flags.doverify)
990                 /* Start delayed path verification. */
991                 ccw_device_online_verify(cdev, 0);
992 }
993
994 static void
995 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
996 {
997         /* When the I/O has terminated, we have to start verification. */
998         if (cdev->private->options.pgroup)
999                 cdev->private->flags.doverify = 1;
1000 }
1001
1002 static void
1003 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1004 {
1005         struct irb *irb;
1006
1007         switch (dev_event) {
1008         case DEV_EVENT_INTERRUPT:
1009                 irb = (struct irb *) __LC_IRB;
1010                 /* Check for unsolicited interrupt. */
1011                 if ((irb->scsw.stctl ==
1012                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1013                     (!irb->scsw.cc))
1014                         /* FIXME: we should restart stlck here, but this
1015                          * is extremely unlikely ... */
1016                         goto out_wakeup;
1017
1018                 ccw_device_accumulate_irb(cdev, irb);
1019                 /* We don't care about basic sense etc. */
1020                 break;
1021         default: /* timeout */
1022                 break;
1023         }
1024 out_wakeup:
1025         wake_up(&cdev->private->wait_q);
1026 }
1027
1028 static void
1029 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1030 {
1031         struct subchannel *sch;
1032
1033         sch = to_subchannel(cdev->dev.parent);
1034         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1035                 /* Couldn't enable the subchannel for i/o. Sick device. */
1036                 return;
1037
1038         /* After 60s the device recognition is considered to have failed. */
1039         ccw_device_set_timeout(cdev, 60*HZ);
1040
1041         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1042         ccw_device_sense_id_start(cdev);
1043 }
1044
1045 void
1046 device_trigger_reprobe(struct subchannel *sch)
1047 {
1048         struct ccw_device *cdev;
1049
1050         if (!sch->dev.driver_data)
1051                 return;
1052         cdev = sch->dev.driver_data;
1053         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1054                 return;
1055
1056         /* Update some values. */
1057         if (stsch(sch->schid, &sch->schib))
1058                 return;
1059
1060         /*
1061          * The pim, pam, pom values may not be accurate, but they are the best
1062          * we have before performing device selection :/
1063          */
1064         sch->lpm = sch->schib.pmcw.pim &
1065                 sch->schib.pmcw.pam &
1066                 sch->schib.pmcw.pom &
1067                 sch->opm;
1068         /* Re-set some bits in the pmcw that were lost. */
1069         sch->schib.pmcw.isc = 3;
1070         sch->schib.pmcw.csense = 1;
1071         sch->schib.pmcw.ena = 0;
1072         if ((sch->lpm & (sch->lpm - 1)) != 0)
1073                 sch->schib.pmcw.mp = 1;
1074         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1075         /* We should also udate ssd info, but this has to wait. */
1076         ccw_device_start_id(cdev, 0);
1077 }
1078
1079 static void
1080 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1081 {
1082         struct subchannel *sch;
1083
1084         sch = to_subchannel(cdev->dev.parent);
1085         /*
1086          * An interrupt in state offline means a previous disable was not
1087          * successful. Try again.
1088          */
1089         cio_disable_subchannel(sch);
1090 }
1091
1092 static void
1093 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1094 {
1095         retry_set_schib(cdev);
1096         cdev->private->state = DEV_STATE_ONLINE;
1097         dev_fsm_event(cdev, dev_event);
1098 }
1099
1100 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1101                                        enum dev_event dev_event)
1102 {
1103         cmf_retry_copy_block(cdev);
1104         cdev->private->state = DEV_STATE_ONLINE;
1105         dev_fsm_event(cdev, dev_event);
1106 }
1107
1108 static void
1109 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1110 {
1111         ccw_device_set_timeout(cdev, 0);
1112         if (dev_event == DEV_EVENT_NOTOPER)
1113                 cdev->private->state = DEV_STATE_NOT_OPER;
1114         else
1115                 cdev->private->state = DEV_STATE_OFFLINE;
1116         wake_up(&cdev->private->wait_q);
1117 }
1118
1119 static void
1120 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1121 {
1122         int ret;
1123
1124         ret = ccw_device_cancel_halt_clear(cdev);
1125         switch (ret) {
1126         case 0:
1127                 cdev->private->state = DEV_STATE_OFFLINE;
1128                 wake_up(&cdev->private->wait_q);
1129                 break;
1130         case -ENODEV:
1131                 cdev->private->state = DEV_STATE_NOT_OPER;
1132                 wake_up(&cdev->private->wait_q);
1133                 break;
1134         default:
1135                 ccw_device_set_timeout(cdev, HZ/10);
1136         }
1137 }
1138
1139 /*
1140  * No operation action. This is used e.g. to ignore a timeout event in
1141  * state offline.
1142  */
1143 static void
1144 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1145 {
1146 }
1147
1148 /*
1149  * Bug operation action. 
1150  */
1151 static void
1152 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1153 {
1154         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1155                cdev->private->state, dev_event);
1156         BUG();
1157 }
1158
1159 /*
1160  * device statemachine
1161  */
1162 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1163         [DEV_STATE_NOT_OPER] = {
1164                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1165                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1166                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1167                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1168         },
1169         [DEV_STATE_SENSE_PGID] = {
1170                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1171                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1172                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1173                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1174         },
1175         [DEV_STATE_SENSE_ID] = {
1176                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1177                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1178                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1179                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1180         },
1181         [DEV_STATE_OFFLINE] = {
1182                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1183                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1184                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1185                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1186         },
1187         [DEV_STATE_VERIFY] = {
1188                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1189                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1190                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1191                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1192         },
1193         [DEV_STATE_ONLINE] = {
1194                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1195                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1196                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1197                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1198         },
1199         [DEV_STATE_W4SENSE] = {
1200                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1201                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1202                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1203                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1204         },
1205         [DEV_STATE_DISBAND_PGID] = {
1206                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1207                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1208                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1209                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1210         },
1211         [DEV_STATE_BOXED] = {
1212                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1213                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1214                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1215                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1216         },
1217         /* states to wait for i/o completion before doing something */
1218         [DEV_STATE_CLEAR_VERIFY] = {
1219                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1220                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1221                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1222                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1223         },
1224         [DEV_STATE_TIMEOUT_KILL] = {
1225                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1226                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1227                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1228                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1229         },
1230         [DEV_STATE_WAIT4IO] = {
1231                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1232                 [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
1233                 [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
1234                 [DEV_EVENT_VERIFY]      = ccw_device_wait4io_verify,
1235         },
1236         [DEV_STATE_QUIESCE] = {
1237                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1238                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1239                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1240                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1241         },
1242         /* special states for devices gone not operational */
1243         [DEV_STATE_DISCONNECTED] = {
1244                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1245                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1246                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1247                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1248         },
1249         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1250                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1251                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1252                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1253                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1254         },
1255         [DEV_STATE_CMFCHANGE] = {
1256                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1257                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1258                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1259                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1260         },
1261         [DEV_STATE_CMFUPDATE] = {
1262                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1263                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1264                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1265                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1266         },
1267 };
1268
1269 /*
1270  * io_subchannel_irq is called for "real" interrupts or for status
1271  * pending conditions on msch.
1272  */
1273 void
1274 io_subchannel_irq (struct device *pdev)
1275 {
1276         struct ccw_device *cdev;
1277
1278         cdev = to_subchannel(pdev)->dev.driver_data;
1279
1280         CIO_TRACE_EVENT (3, "IRQ");
1281         CIO_TRACE_EVENT (3, pdev->bus_id);
1282         if (cdev)
1283                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1284 }
1285
1286 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);