[S390] cio: Dump ccw device information in case of timeout.
[pandora-kernel.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "css.h"
23 #include "device.h"
24 #include "chsc.h"
25 #include "ioasm.h"
26 #include "chp.h"
27
28 static int timeout_log_enabled;
29
30 int
31 device_is_online(struct subchannel *sch)
32 {
33         struct ccw_device *cdev;
34
35         if (!sch->dev.driver_data)
36                 return 0;
37         cdev = sch->dev.driver_data;
38         return (cdev->private->state == DEV_STATE_ONLINE);
39 }
40
41 int
42 device_is_disconnected(struct subchannel *sch)
43 {
44         struct ccw_device *cdev;
45
46         if (!sch->dev.driver_data)
47                 return 0;
48         cdev = sch->dev.driver_data;
49         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51 }
52
53 void
54 device_set_disconnected(struct subchannel *sch)
55 {
56         struct ccw_device *cdev;
57
58         if (!sch->dev.driver_data)
59                 return;
60         cdev = sch->dev.driver_data;
61         ccw_device_set_timeout(cdev, 0);
62         cdev->private->flags.fake_irb = 0;
63         cdev->private->state = DEV_STATE_DISCONNECTED;
64 }
65
66 void device_set_intretry(struct subchannel *sch)
67 {
68         struct ccw_device *cdev;
69
70         cdev = sch->dev.driver_data;
71         if (!cdev)
72                 return;
73         cdev->private->flags.intretry = 1;
74 }
75
76 int device_trigger_verify(struct subchannel *sch)
77 {
78         struct ccw_device *cdev;
79
80         cdev = sch->dev.driver_data;
81         if (!cdev || !cdev->online)
82                 return -EINVAL;
83         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
84         return 0;
85 }
86
87 static int __init ccw_timeout_log_setup(char *unused)
88 {
89         timeout_log_enabled = 1;
90         return 1;
91 }
92
93 __setup("ccw_timeout_log", ccw_timeout_log_setup);
94
95 static void ccw_timeout_log(struct ccw_device *cdev)
96 {
97         struct schib schib;
98         struct subchannel *sch;
99         int cc;
100
101         sch = to_subchannel(cdev->dev.parent);
102         cc = stsch(sch->schid, &schib);
103
104         printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
105                "device information:\n", get_clock());
106         printk(KERN_WARNING "cio: orb:\n");
107         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
108                        &sch->orb, sizeof(sch->orb), 0);
109         printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
110         printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
111         printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
112                "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
113
114         if ((void *)(addr_t)sch->orb.cpa == &sch->sense_ccw ||
115             (void *)(addr_t)sch->orb.cpa == cdev->private->iccws)
116                 printk(KERN_WARNING "cio: last channel program (intern):\n");
117         else
118                 printk(KERN_WARNING "cio: last channel program:\n");
119
120         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
121                        (void *)(addr_t)sch->orb.cpa, sizeof(struct ccw1), 0);
122         printk(KERN_WARNING "cio: ccw device state: %d\n",
123                cdev->private->state);
124         printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
125         printk(KERN_WARNING "cio: schib:\n");
126         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
127                        &schib, sizeof(schib), 0);
128         printk(KERN_WARNING "cio: ccw device flags:\n");
129         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
130                        &cdev->private->flags, sizeof(cdev->private->flags), 0);
131 }
132
133 /*
134  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
135  */
136 static void
137 ccw_device_timeout(unsigned long data)
138 {
139         struct ccw_device *cdev;
140
141         cdev = (struct ccw_device *) data;
142         spin_lock_irq(cdev->ccwlock);
143         if (timeout_log_enabled)
144                 ccw_timeout_log(cdev);
145         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
146         spin_unlock_irq(cdev->ccwlock);
147 }
148
149 /*
150  * Set timeout
151  */
152 void
153 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
154 {
155         if (expires == 0) {
156                 del_timer(&cdev->private->timer);
157                 return;
158         }
159         if (timer_pending(&cdev->private->timer)) {
160                 if (mod_timer(&cdev->private->timer, jiffies + expires))
161                         return;
162         }
163         cdev->private->timer.function = ccw_device_timeout;
164         cdev->private->timer.data = (unsigned long) cdev;
165         cdev->private->timer.expires = jiffies + expires;
166         add_timer(&cdev->private->timer);
167 }
168
169 /* Kill any pending timers after machine check. */
170 void
171 device_kill_pending_timer(struct subchannel *sch)
172 {
173         struct ccw_device *cdev;
174
175         if (!sch->dev.driver_data)
176                 return;
177         cdev = sch->dev.driver_data;
178         ccw_device_set_timeout(cdev, 0);
179 }
180
181 /*
182  * Cancel running i/o. This is called repeatedly since halt/clear are
183  * asynchronous operations. We do one try with cio_cancel, two tries
184  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
185  * Returns 0 if device now idle, -ENODEV for device not operational and
186  * -EBUSY if an interrupt is expected (either from halt/clear or from a
187  * status pending).
188  */
189 int
190 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
191 {
192         struct subchannel *sch;
193         int ret;
194
195         sch = to_subchannel(cdev->dev.parent);
196         ret = stsch(sch->schid, &sch->schib);
197         if (ret || !sch->schib.pmcw.dnv)
198                 return -ENODEV; 
199         if (!sch->schib.pmcw.ena)
200                 /* Not operational -> done. */
201                 return 0;
202         /* Stage 1: cancel io. */
203         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
204             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
205                 ret = cio_cancel(sch);
206                 if (ret != -EINVAL)
207                         return ret;
208                 /* cancel io unsuccessful. From now on it is asynchronous. */
209                 cdev->private->iretry = 3;      /* 3 halt retries. */
210         }
211         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
212                 /* Stage 2: halt io. */
213                 if (cdev->private->iretry) {
214                         cdev->private->iretry--;
215                         ret = cio_halt(sch);
216                         if (ret != -EBUSY)
217                                 return (ret == 0) ? -EBUSY : ret;
218                 }
219                 /* halt io unsuccessful. */
220                 cdev->private->iretry = 255;    /* 255 clear retries. */
221         }
222         /* Stage 3: clear io. */
223         if (cdev->private->iretry) {
224                 cdev->private->iretry--;
225                 ret = cio_clear (sch);
226                 return (ret == 0) ? -EBUSY : ret;
227         }
228         panic("Can't stop i/o on subchannel.\n");
229 }
230
231 static int
232 ccw_device_handle_oper(struct ccw_device *cdev)
233 {
234         struct subchannel *sch;
235
236         sch = to_subchannel(cdev->dev.parent);
237         cdev->private->flags.recog_done = 1;
238         /*
239          * Check if cu type and device type still match. If
240          * not, it is certainly another device and we have to
241          * de- and re-register.
242          */
243         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
244             cdev->id.cu_model != cdev->private->senseid.cu_model ||
245             cdev->id.dev_type != cdev->private->senseid.dev_type ||
246             cdev->id.dev_model != cdev->private->senseid.dev_model) {
247                 PREPARE_WORK(&cdev->private->kick_work,
248                              ccw_device_do_unreg_rereg);
249                 queue_work(ccw_device_work, &cdev->private->kick_work);
250                 return 0;
251         }
252         cdev->private->flags.donotify = 1;
253         return 1;
254 }
255
256 /*
257  * The machine won't give us any notification by machine check if a chpid has
258  * been varied online on the SE so we have to find out by magic (i. e. driving
259  * the channel subsystem to device selection and updating our path masks).
260  */
261 static void
262 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
263 {
264         int mask, i;
265         struct chp_id chpid;
266
267         chp_id_init(&chpid);
268         for (i = 0; i<8; i++) {
269                 mask = 0x80 >> i;
270                 if (!(sch->lpm & mask))
271                         continue;
272                 if (old_lpm & mask)
273                         continue;
274                 chpid.id = sch->schib.pmcw.chpid[i];
275                 if (!chp_is_registered(chpid))
276                         css_schedule_eval_all();
277         }
278 }
279
280 /*
281  * Stop device recognition.
282  */
283 static void
284 ccw_device_recog_done(struct ccw_device *cdev, int state)
285 {
286         struct subchannel *sch;
287         int notify, old_lpm, same_dev;
288
289         sch = to_subchannel(cdev->dev.parent);
290
291         ccw_device_set_timeout(cdev, 0);
292         cio_disable_subchannel(sch);
293         /*
294          * Now that we tried recognition, we have performed device selection
295          * through ssch() and the path information is up to date.
296          */
297         old_lpm = sch->lpm;
298         stsch(sch->schid, &sch->schib);
299         sch->lpm = sch->schib.pmcw.pam & sch->opm;
300         /* Check since device may again have become not operational. */
301         if (!sch->schib.pmcw.dnv)
302                 state = DEV_STATE_NOT_OPER;
303         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
304                 /* Force reprobe on all chpids. */
305                 old_lpm = 0;
306         if (sch->lpm != old_lpm)
307                 __recover_lost_chpids(sch, old_lpm);
308         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
309                 if (state == DEV_STATE_NOT_OPER) {
310                         cdev->private->flags.recog_done = 1;
311                         cdev->private->state = DEV_STATE_DISCONNECTED;
312                         return;
313                 }
314                 /* Boxed devices don't need extra treatment. */
315         }
316         notify = 0;
317         same_dev = 0; /* Keep the compiler quiet... */
318         switch (state) {
319         case DEV_STATE_NOT_OPER:
320                 CIO_DEBUG(KERN_WARNING, 2,
321                           "cio: SenseID : unknown device %04x on subchannel "
322                           "0.%x.%04x\n", cdev->private->dev_id.devno,
323                           sch->schid.ssid, sch->schid.sch_no);
324                 break;
325         case DEV_STATE_OFFLINE:
326                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
327                         same_dev = ccw_device_handle_oper(cdev);
328                         notify = 1;
329                 }
330                 /* fill out sense information */
331                 memset(&cdev->id, 0, sizeof(cdev->id));
332                 cdev->id.cu_type   = cdev->private->senseid.cu_type;
333                 cdev->id.cu_model  = cdev->private->senseid.cu_model;
334                 cdev->id.dev_type  = cdev->private->senseid.dev_type;
335                 cdev->id.dev_model = cdev->private->senseid.dev_model;
336                 if (notify) {
337                         cdev->private->state = DEV_STATE_OFFLINE;
338                         if (same_dev) {
339                                 /* Get device online again. */
340                                 ccw_device_online(cdev);
341                                 wake_up(&cdev->private->wait_q);
342                         }
343                         return;
344                 }
345                 /* Issue device info message. */
346                 CIO_DEBUG(KERN_INFO, 2,
347                           "cio: SenseID : device 0.%x.%04x reports: "
348                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
349                           "%04X/%02X\n",
350                           cdev->private->dev_id.ssid,
351                           cdev->private->dev_id.devno,
352                           cdev->id.cu_type, cdev->id.cu_model,
353                           cdev->id.dev_type, cdev->id.dev_model);
354                 break;
355         case DEV_STATE_BOXED:
356                 CIO_DEBUG(KERN_WARNING, 2,
357                           "cio: SenseID : boxed device %04x on subchannel "
358                           "0.%x.%04x\n", cdev->private->dev_id.devno,
359                           sch->schid.ssid, sch->schid.sch_no);
360                 break;
361         }
362         cdev->private->state = state;
363         io_subchannel_recog_done(cdev);
364         if (state != DEV_STATE_NOT_OPER)
365                 wake_up(&cdev->private->wait_q);
366 }
367
368 /*
369  * Function called from device_id.c after sense id has completed.
370  */
371 void
372 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
373 {
374         switch (err) {
375         case 0:
376                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
377                 break;
378         case -ETIME:            /* Sense id stopped by timeout. */
379                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
380                 break;
381         default:
382                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
383                 break;
384         }
385 }
386
387 static void
388 ccw_device_oper_notify(struct work_struct *work)
389 {
390         struct ccw_device_private *priv;
391         struct ccw_device *cdev;
392         struct subchannel *sch;
393         int ret;
394         unsigned long flags;
395
396         priv = container_of(work, struct ccw_device_private, kick_work);
397         cdev = priv->cdev;
398         spin_lock_irqsave(cdev->ccwlock, flags);
399         sch = to_subchannel(cdev->dev.parent);
400         if (sch->driver && sch->driver->notify) {
401                 spin_unlock_irqrestore(cdev->ccwlock, flags);
402                 ret = sch->driver->notify(&sch->dev, CIO_OPER);
403                 spin_lock_irqsave(cdev->ccwlock, flags);
404         } else
405                 ret = 0;
406         if (ret) {
407                 /* Reenable channel measurements, if needed. */
408                 spin_unlock_irqrestore(cdev->ccwlock, flags);
409                 cmf_reenable(cdev);
410                 spin_lock_irqsave(cdev->ccwlock, flags);
411                 wake_up(&cdev->private->wait_q);
412         }
413         spin_unlock_irqrestore(cdev->ccwlock, flags);
414         if (!ret)
415                 /* Driver doesn't want device back. */
416                 ccw_device_do_unreg_rereg(work);
417 }
418
419 /*
420  * Finished with online/offline processing.
421  */
422 static void
423 ccw_device_done(struct ccw_device *cdev, int state)
424 {
425         struct subchannel *sch;
426
427         sch = to_subchannel(cdev->dev.parent);
428
429         ccw_device_set_timeout(cdev, 0);
430
431         if (state != DEV_STATE_ONLINE)
432                 cio_disable_subchannel(sch);
433
434         /* Reset device status. */
435         memset(&cdev->private->irb, 0, sizeof(struct irb));
436
437         cdev->private->state = state;
438
439
440         if (state == DEV_STATE_BOXED)
441                 CIO_DEBUG(KERN_WARNING, 2,
442                           "cio: Boxed device %04x on subchannel %04x\n",
443                           cdev->private->dev_id.devno, sch->schid.sch_no);
444
445         if (cdev->private->flags.donotify) {
446                 cdev->private->flags.donotify = 0;
447                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
448                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
449         }
450         wake_up(&cdev->private->wait_q);
451
452         if (css_init_done && state != DEV_STATE_ONLINE)
453                 put_device (&cdev->dev);
454 }
455
456 static int cmp_pgid(struct pgid *p1, struct pgid *p2)
457 {
458         char *c1;
459         char *c2;
460
461         c1 = (char *)p1;
462         c2 = (char *)p2;
463
464         return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
465 }
466
467 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
468 {
469         int i;
470         int last;
471
472         last = 0;
473         for (i = 0; i < 8; i++) {
474                 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
475                         /* No PGID yet */
476                         continue;
477                 if (cdev->private->pgid[last].inf.ps.state1 ==
478                     SNID_STATE1_RESET) {
479                         /* First non-zero PGID */
480                         last = i;
481                         continue;
482                 }
483                 if (cmp_pgid(&cdev->private->pgid[i],
484                              &cdev->private->pgid[last]) == 0)
485                         /* Non-conflicting PGIDs */
486                         continue;
487
488                 /* PGID mismatch, can't pathgroup. */
489                 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
490                               "0.%x.%04x, can't pathgroup\n",
491                               cdev->private->dev_id.ssid,
492                               cdev->private->dev_id.devno);
493                 cdev->private->options.pgroup = 0;
494                 return;
495         }
496         if (cdev->private->pgid[last].inf.ps.state1 ==
497             SNID_STATE1_RESET)
498                 /* No previous pgid found */
499                 memcpy(&cdev->private->pgid[0],
500                        &channel_subsystems[0]->global_pgid,
501                        sizeof(struct pgid));
502         else
503                 /* Use existing pgid */
504                 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
505                        sizeof(struct pgid));
506 }
507
508 /*
509  * Function called from device_pgid.c after sense path ground has completed.
510  */
511 void
512 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
513 {
514         struct subchannel *sch;
515
516         sch = to_subchannel(cdev->dev.parent);
517         switch (err) {
518         case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
519                 cdev->private->options.pgroup = 0;
520                 break;
521         case 0: /* success */
522         case -EACCES: /* partial success, some paths not operational */
523                 /* Check if all pgids are equal or 0. */
524                 __ccw_device_get_common_pgid(cdev);
525                 break;
526         case -ETIME:            /* Sense path group id stopped by timeout. */
527         case -EUSERS:           /* device is reserved for someone else. */
528                 ccw_device_done(cdev, DEV_STATE_BOXED);
529                 return;
530         default:
531                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
532                 return;
533         }
534         /* Start Path Group verification. */
535         cdev->private->state = DEV_STATE_VERIFY;
536         cdev->private->flags.doverify = 0;
537         ccw_device_verify_start(cdev);
538 }
539
540 /*
541  * Start device recognition.
542  */
543 int
544 ccw_device_recognition(struct ccw_device *cdev)
545 {
546         struct subchannel *sch;
547         int ret;
548
549         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
550             (cdev->private->state != DEV_STATE_BOXED))
551                 return -EINVAL;
552         sch = to_subchannel(cdev->dev.parent);
553         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
554         if (ret != 0)
555                 /* Couldn't enable the subchannel for i/o. Sick device. */
556                 return ret;
557
558         /* After 60s the device recognition is considered to have failed. */
559         ccw_device_set_timeout(cdev, 60*HZ);
560
561         /*
562          * We used to start here with a sense pgid to find out whether a device
563          * is locked by someone else. Unfortunately, the sense pgid command
564          * code has other meanings on devices predating the path grouping
565          * algorithm, so we start with sense id and box the device after an
566          * timeout (or if sense pgid during path verification detects the device
567          * is locked, as may happen on newer devices).
568          */
569         cdev->private->flags.recog_done = 0;
570         cdev->private->state = DEV_STATE_SENSE_ID;
571         ccw_device_sense_id_start(cdev);
572         return 0;
573 }
574
575 /*
576  * Handle timeout in device recognition.
577  */
578 static void
579 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
580 {
581         int ret;
582
583         ret = ccw_device_cancel_halt_clear(cdev);
584         switch (ret) {
585         case 0:
586                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
587                 break;
588         case -ENODEV:
589                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
590                 break;
591         default:
592                 ccw_device_set_timeout(cdev, 3*HZ);
593         }
594 }
595
596
597 void
598 ccw_device_verify_done(struct ccw_device *cdev, int err)
599 {
600         struct subchannel *sch;
601
602         sch = to_subchannel(cdev->dev.parent);
603         /* Update schib - pom may have changed. */
604         stsch(sch->schid, &sch->schib);
605         /* Update lpm with verified path mask. */
606         sch->lpm = sch->vpm;
607         /* Repeat path verification? */
608         if (cdev->private->flags.doverify) {
609                 cdev->private->flags.doverify = 0;
610                 ccw_device_verify_start(cdev);
611                 return;
612         }
613         switch (err) {
614         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
615                 cdev->private->options.pgroup = 0;
616         case 0:
617                 ccw_device_done(cdev, DEV_STATE_ONLINE);
618                 /* Deliver fake irb to device driver, if needed. */
619                 if (cdev->private->flags.fake_irb) {
620                         memset(&cdev->private->irb, 0, sizeof(struct irb));
621                         cdev->private->irb.scsw.cc = 1;
622                         cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
623                         cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
624                         cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
625                         cdev->private->flags.fake_irb = 0;
626                         if (cdev->handler)
627                                 cdev->handler(cdev, cdev->private->intparm,
628                                               &cdev->private->irb);
629                         memset(&cdev->private->irb, 0, sizeof(struct irb));
630                 }
631                 break;
632         case -ETIME:
633                 /* Reset oper notify indication after verify error. */
634                 cdev->private->flags.donotify = 0;
635                 ccw_device_done(cdev, DEV_STATE_BOXED);
636                 break;
637         default:
638                 /* Reset oper notify indication after verify error. */
639                 cdev->private->flags.donotify = 0;
640                 if (cdev->online)
641                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
642                 else
643                         ccw_device_done(cdev, DEV_STATE_NOT_OPER);
644                 break;
645         }
646 }
647
648 /*
649  * Get device online.
650  */
651 int
652 ccw_device_online(struct ccw_device *cdev)
653 {
654         struct subchannel *sch;
655         int ret;
656
657         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
658             (cdev->private->state != DEV_STATE_BOXED))
659                 return -EINVAL;
660         sch = to_subchannel(cdev->dev.parent);
661         if (css_init_done && !get_device(&cdev->dev))
662                 return -ENODEV;
663         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
664         if (ret != 0) {
665                 /* Couldn't enable the subchannel for i/o. Sick device. */
666                 if (ret == -ENODEV)
667                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
668                 return ret;
669         }
670         /* Do we want to do path grouping? */
671         if (!cdev->private->options.pgroup) {
672                 /* Start initial path verification. */
673                 cdev->private->state = DEV_STATE_VERIFY;
674                 cdev->private->flags.doverify = 0;
675                 ccw_device_verify_start(cdev);
676                 return 0;
677         }
678         /* Do a SensePGID first. */
679         cdev->private->state = DEV_STATE_SENSE_PGID;
680         ccw_device_sense_pgid_start(cdev);
681         return 0;
682 }
683
684 void
685 ccw_device_disband_done(struct ccw_device *cdev, int err)
686 {
687         switch (err) {
688         case 0:
689                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
690                 break;
691         case -ETIME:
692                 ccw_device_done(cdev, DEV_STATE_BOXED);
693                 break;
694         default:
695                 cdev->private->flags.donotify = 0;
696                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
697                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
698                 break;
699         }
700 }
701
702 /*
703  * Shutdown device.
704  */
705 int
706 ccw_device_offline(struct ccw_device *cdev)
707 {
708         struct subchannel *sch;
709
710         if (ccw_device_is_orphan(cdev)) {
711                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
712                 return 0;
713         }
714         sch = to_subchannel(cdev->dev.parent);
715         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
716                 return -ENODEV;
717         if (cdev->private->state != DEV_STATE_ONLINE) {
718                 if (sch->schib.scsw.actl != 0)
719                         return -EBUSY;
720                 return -EINVAL;
721         }
722         if (sch->schib.scsw.actl != 0)
723                 return -EBUSY;
724         /* Are we doing path grouping? */
725         if (!cdev->private->options.pgroup) {
726                 /* No, set state offline immediately. */
727                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
728                 return 0;
729         }
730         /* Start Set Path Group commands. */
731         cdev->private->state = DEV_STATE_DISBAND_PGID;
732         ccw_device_disband_start(cdev);
733         return 0;
734 }
735
736 /*
737  * Handle timeout in device online/offline process.
738  */
739 static void
740 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
741 {
742         int ret;
743
744         ret = ccw_device_cancel_halt_clear(cdev);
745         switch (ret) {
746         case 0:
747                 ccw_device_done(cdev, DEV_STATE_BOXED);
748                 break;
749         case -ENODEV:
750                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
751                 break;
752         default:
753                 ccw_device_set_timeout(cdev, 3*HZ);
754         }
755 }
756
757 /*
758  * Handle not oper event in device recognition.
759  */
760 static void
761 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
762 {
763         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
764 }
765
766 /*
767  * Handle not operational event in non-special state.
768  */
769 static void ccw_device_generic_notoper(struct ccw_device *cdev,
770                                        enum dev_event dev_event)
771 {
772         struct subchannel *sch;
773
774         cdev->private->state = DEV_STATE_NOT_OPER;
775         sch = to_subchannel(cdev->dev.parent);
776         css_schedule_eval(sch->schid);
777 }
778
779 /*
780  * Handle path verification event.
781  */
782 static void
783 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
784 {
785         struct subchannel *sch;
786
787         if (cdev->private->state == DEV_STATE_W4SENSE) {
788                 cdev->private->flags.doverify = 1;
789                 return;
790         }
791         sch = to_subchannel(cdev->dev.parent);
792         /*
793          * Since we might not just be coming from an interrupt from the
794          * subchannel we have to update the schib.
795          */
796         stsch(sch->schid, &sch->schib);
797
798         if (sch->schib.scsw.actl != 0 ||
799             (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
800             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
801                 /*
802                  * No final status yet or final status not yet delivered
803                  * to the device driver. Can't do path verfication now,
804                  * delay until final status was delivered.
805                  */
806                 cdev->private->flags.doverify = 1;
807                 return;
808         }
809         /* Device is idle, we can do the path verification. */
810         cdev->private->state = DEV_STATE_VERIFY;
811         cdev->private->flags.doverify = 0;
812         ccw_device_verify_start(cdev);
813 }
814
815 /*
816  * Got an interrupt for a normal io (state online).
817  */
818 static void
819 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
820 {
821         struct irb *irb;
822
823         irb = (struct irb *) __LC_IRB;
824         /* Check for unsolicited interrupt. */
825         if ((irb->scsw.stctl ==
826                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
827             && (!irb->scsw.cc)) {
828                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
829                     !irb->esw.esw0.erw.cons) {
830                         /* Unit check but no sense data. Need basic sense. */
831                         if (ccw_device_do_sense(cdev, irb) != 0)
832                                 goto call_handler_unsol;
833                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
834                         cdev->private->state = DEV_STATE_W4SENSE;
835                         cdev->private->intparm = 0;
836                         return;
837                 }
838 call_handler_unsol:
839                 if (cdev->handler)
840                         cdev->handler (cdev, 0, irb);
841                 if (cdev->private->flags.doverify)
842                         ccw_device_online_verify(cdev, 0);
843                 return;
844         }
845         /* Accumulate status and find out if a basic sense is needed. */
846         ccw_device_accumulate_irb(cdev, irb);
847         if (cdev->private->flags.dosense) {
848                 if (ccw_device_do_sense(cdev, irb) == 0) {
849                         cdev->private->state = DEV_STATE_W4SENSE;
850                 }
851                 return;
852         }
853         /* Call the handler. */
854         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
855                 /* Start delayed path verification. */
856                 ccw_device_online_verify(cdev, 0);
857 }
858
859 /*
860  * Got an timeout in online state.
861  */
862 static void
863 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
864 {
865         int ret;
866
867         ccw_device_set_timeout(cdev, 0);
868         ret = ccw_device_cancel_halt_clear(cdev);
869         if (ret == -EBUSY) {
870                 ccw_device_set_timeout(cdev, 3*HZ);
871                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
872                 return;
873         }
874         if (ret == -ENODEV)
875                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
876         else if (cdev->handler)
877                 cdev->handler(cdev, cdev->private->intparm,
878                               ERR_PTR(-ETIMEDOUT));
879 }
880
881 /*
882  * Got an interrupt for a basic sense.
883  */
884 static void
885 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
886 {
887         struct irb *irb;
888
889         irb = (struct irb *) __LC_IRB;
890         /* Check for unsolicited interrupt. */
891         if (irb->scsw.stctl ==
892                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
893                 if (irb->scsw.cc == 1)
894                         /* Basic sense hasn't started. Try again. */
895                         ccw_device_do_sense(cdev, irb);
896                 else {
897                         CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited "
898                                       "interrupt during w4sense...\n",
899                                       cdev->private->dev_id.ssid,
900                                       cdev->private->dev_id.devno);
901                         if (cdev->handler)
902                                 cdev->handler (cdev, 0, irb);
903                 }
904                 return;
905         }
906         /*
907          * Check if a halt or clear has been issued in the meanwhile. If yes,
908          * only deliver the halt/clear interrupt to the device driver as if it
909          * had killed the original request.
910          */
911         if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
912                 /* Retry Basic Sense if requested. */
913                 if (cdev->private->flags.intretry) {
914                         cdev->private->flags.intretry = 0;
915                         ccw_device_do_sense(cdev, irb);
916                         return;
917                 }
918                 cdev->private->flags.dosense = 0;
919                 memset(&cdev->private->irb, 0, sizeof(struct irb));
920                 ccw_device_accumulate_irb(cdev, irb);
921                 goto call_handler;
922         }
923         /* Add basic sense info to irb. */
924         ccw_device_accumulate_basic_sense(cdev, irb);
925         if (cdev->private->flags.dosense) {
926                 /* Another basic sense is needed. */
927                 ccw_device_do_sense(cdev, irb);
928                 return;
929         }
930 call_handler:
931         cdev->private->state = DEV_STATE_ONLINE;
932         /* Call the handler. */
933         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
934                 /* Start delayed path verification. */
935                 ccw_device_online_verify(cdev, 0);
936 }
937
938 static void
939 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
940 {
941         struct irb *irb;
942
943         irb = (struct irb *) __LC_IRB;
944         /* Accumulate status. We don't do basic sense. */
945         ccw_device_accumulate_irb(cdev, irb);
946         /* Remember to clear irb to avoid residuals. */
947         memset(&cdev->private->irb, 0, sizeof(struct irb));
948         /* Try to start delayed device verification. */
949         ccw_device_online_verify(cdev, 0);
950         /* Note: Don't call handler for cio initiated clear! */
951 }
952
953 static void
954 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
955 {
956         struct subchannel *sch;
957
958         sch = to_subchannel(cdev->dev.parent);
959         ccw_device_set_timeout(cdev, 0);
960         /* Start delayed path verification. */
961         ccw_device_online_verify(cdev, 0);
962         /* OK, i/o is dead now. Call interrupt handler. */
963         if (cdev->handler)
964                 cdev->handler(cdev, cdev->private->intparm,
965                               ERR_PTR(-EIO));
966 }
967
968 static void
969 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
970 {
971         int ret;
972
973         ret = ccw_device_cancel_halt_clear(cdev);
974         if (ret == -EBUSY) {
975                 ccw_device_set_timeout(cdev, 3*HZ);
976                 return;
977         }
978         /* Start delayed path verification. */
979         ccw_device_online_verify(cdev, 0);
980         if (cdev->handler)
981                 cdev->handler(cdev, cdev->private->intparm,
982                               ERR_PTR(-EIO));
983 }
984
985 void device_kill_io(struct subchannel *sch)
986 {
987         int ret;
988         struct ccw_device *cdev;
989
990         cdev = sch->dev.driver_data;
991         ret = ccw_device_cancel_halt_clear(cdev);
992         if (ret == -EBUSY) {
993                 ccw_device_set_timeout(cdev, 3*HZ);
994                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
995                 return;
996         }
997         /* Start delayed path verification. */
998         ccw_device_online_verify(cdev, 0);
999         if (cdev->handler)
1000                 cdev->handler(cdev, cdev->private->intparm,
1001                               ERR_PTR(-EIO));
1002 }
1003
1004 static void
1005 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1006 {
1007         /* Start verification after current task finished. */
1008         cdev->private->flags.doverify = 1;
1009 }
1010
1011 static void
1012 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1013 {
1014         struct irb *irb;
1015
1016         switch (dev_event) {
1017         case DEV_EVENT_INTERRUPT:
1018                 irb = (struct irb *) __LC_IRB;
1019                 /* Check for unsolicited interrupt. */
1020                 if ((irb->scsw.stctl ==
1021                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1022                     (!irb->scsw.cc))
1023                         /* FIXME: we should restart stlck here, but this
1024                          * is extremely unlikely ... */
1025                         goto out_wakeup;
1026
1027                 ccw_device_accumulate_irb(cdev, irb);
1028                 /* We don't care about basic sense etc. */
1029                 break;
1030         default: /* timeout */
1031                 break;
1032         }
1033 out_wakeup:
1034         wake_up(&cdev->private->wait_q);
1035 }
1036
1037 static void
1038 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1039 {
1040         struct subchannel *sch;
1041
1042         sch = to_subchannel(cdev->dev.parent);
1043         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1044                 /* Couldn't enable the subchannel for i/o. Sick device. */
1045                 return;
1046
1047         /* After 60s the device recognition is considered to have failed. */
1048         ccw_device_set_timeout(cdev, 60*HZ);
1049
1050         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1051         ccw_device_sense_id_start(cdev);
1052 }
1053
1054 void
1055 device_trigger_reprobe(struct subchannel *sch)
1056 {
1057         struct ccw_device *cdev;
1058
1059         if (!sch->dev.driver_data)
1060                 return;
1061         cdev = sch->dev.driver_data;
1062         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1063                 return;
1064
1065         /* Update some values. */
1066         if (stsch(sch->schid, &sch->schib))
1067                 return;
1068         if (!sch->schib.pmcw.dnv)
1069                 return;
1070         /*
1071          * The pim, pam, pom values may not be accurate, but they are the best
1072          * we have before performing device selection :/
1073          */
1074         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1075         /* Re-set some bits in the pmcw that were lost. */
1076         sch->schib.pmcw.isc = 3;
1077         sch->schib.pmcw.csense = 1;
1078         sch->schib.pmcw.ena = 0;
1079         if ((sch->lpm & (sch->lpm - 1)) != 0)
1080                 sch->schib.pmcw.mp = 1;
1081         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1082         /* We should also udate ssd info, but this has to wait. */
1083         /* Check if this is another device which appeared on the same sch. */
1084         if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1085                 PREPARE_WORK(&cdev->private->kick_work,
1086                              ccw_device_move_to_orphanage);
1087                 queue_work(slow_path_wq, &cdev->private->kick_work);
1088         } else
1089                 ccw_device_start_id(cdev, 0);
1090 }
1091
1092 static void
1093 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1094 {
1095         struct subchannel *sch;
1096
1097         sch = to_subchannel(cdev->dev.parent);
1098         /*
1099          * An interrupt in state offline means a previous disable was not
1100          * successful. Try again.
1101          */
1102         cio_disable_subchannel(sch);
1103 }
1104
1105 static void
1106 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1107 {
1108         retry_set_schib(cdev);
1109         cdev->private->state = DEV_STATE_ONLINE;
1110         dev_fsm_event(cdev, dev_event);
1111 }
1112
1113 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1114                                        enum dev_event dev_event)
1115 {
1116         cmf_retry_copy_block(cdev);
1117         cdev->private->state = DEV_STATE_ONLINE;
1118         dev_fsm_event(cdev, dev_event);
1119 }
1120
1121 static void
1122 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1123 {
1124         ccw_device_set_timeout(cdev, 0);
1125         if (dev_event == DEV_EVENT_NOTOPER)
1126                 cdev->private->state = DEV_STATE_NOT_OPER;
1127         else
1128                 cdev->private->state = DEV_STATE_OFFLINE;
1129         wake_up(&cdev->private->wait_q);
1130 }
1131
1132 static void
1133 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1134 {
1135         int ret;
1136
1137         ret = ccw_device_cancel_halt_clear(cdev);
1138         switch (ret) {
1139         case 0:
1140                 cdev->private->state = DEV_STATE_OFFLINE;
1141                 wake_up(&cdev->private->wait_q);
1142                 break;
1143         case -ENODEV:
1144                 cdev->private->state = DEV_STATE_NOT_OPER;
1145                 wake_up(&cdev->private->wait_q);
1146                 break;
1147         default:
1148                 ccw_device_set_timeout(cdev, HZ/10);
1149         }
1150 }
1151
1152 /*
1153  * No operation action. This is used e.g. to ignore a timeout event in
1154  * state offline.
1155  */
1156 static void
1157 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1158 {
1159 }
1160
1161 /*
1162  * Bug operation action. 
1163  */
1164 static void
1165 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1166 {
1167         CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n",
1168                       cdev->private->state, dev_event);
1169         BUG();
1170 }
1171
1172 /*
1173  * device statemachine
1174  */
1175 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1176         [DEV_STATE_NOT_OPER] = {
1177                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1178                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1179                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1180                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1181         },
1182         [DEV_STATE_SENSE_PGID] = {
1183                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1184                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1185                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1186                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1187         },
1188         [DEV_STATE_SENSE_ID] = {
1189                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1190                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1191                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1192                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1193         },
1194         [DEV_STATE_OFFLINE] = {
1195                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1196                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1197                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1198                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1199         },
1200         [DEV_STATE_VERIFY] = {
1201                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1202                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1203                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1204                 [DEV_EVENT_VERIFY]      = ccw_device_delay_verify,
1205         },
1206         [DEV_STATE_ONLINE] = {
1207                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1208                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1209                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1210                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1211         },
1212         [DEV_STATE_W4SENSE] = {
1213                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1214                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1215                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1216                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1217         },
1218         [DEV_STATE_DISBAND_PGID] = {
1219                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1220                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1221                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1222                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1223         },
1224         [DEV_STATE_BOXED] = {
1225                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1226                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1227                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1228                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1229         },
1230         /* states to wait for i/o completion before doing something */
1231         [DEV_STATE_CLEAR_VERIFY] = {
1232                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1233                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1234                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1235                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1236         },
1237         [DEV_STATE_TIMEOUT_KILL] = {
1238                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1239                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1240                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1241                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1242         },
1243         [DEV_STATE_QUIESCE] = {
1244                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1245                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1246                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1247                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1248         },
1249         /* special states for devices gone not operational */
1250         [DEV_STATE_DISCONNECTED] = {
1251                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1252                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1253                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1254                 [DEV_EVENT_VERIFY]      = ccw_device_start_id,
1255         },
1256         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1257                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1258                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1259                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1260                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1261         },
1262         [DEV_STATE_CMFCHANGE] = {
1263                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1264                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1265                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1266                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1267         },
1268         [DEV_STATE_CMFUPDATE] = {
1269                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1270                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1271                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1272                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1273         },
1274 };
1275
1276 /*
1277  * io_subchannel_irq is called for "real" interrupts or for status
1278  * pending conditions on msch.
1279  */
1280 void
1281 io_subchannel_irq (struct device *pdev)
1282 {
1283         struct ccw_device *cdev;
1284
1285         cdev = to_subchannel(pdev)->dev.driver_data;
1286
1287         CIO_TRACE_EVENT (3, "IRQ");
1288         CIO_TRACE_EVENT (3, pdev->bus_id);
1289         if (cdev)
1290                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1291 }
1292
1293 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);