[S390] cio: Use dev_{g,s}et_drvdata().
[pandora-kernel.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "css.h"
23 #include "device.h"
24 #include "chsc.h"
25 #include "ioasm.h"
26 #include "chp.h"
27
28 static int timeout_log_enabled;
29
30 int
31 device_is_online(struct subchannel *sch)
32 {
33         struct ccw_device *cdev;
34
35         cdev = sch_get_cdev(sch);
36         if (!cdev)
37                 return 0;
38         return (cdev->private->state == DEV_STATE_ONLINE);
39 }
40
41 int
42 device_is_disconnected(struct subchannel *sch)
43 {
44         struct ccw_device *cdev;
45
46         cdev = sch_get_cdev(sch);
47         if (!cdev)
48                 return 0;
49         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51 }
52
53 void
54 device_set_disconnected(struct subchannel *sch)
55 {
56         struct ccw_device *cdev;
57
58         cdev = sch_get_cdev(sch);
59         if (!cdev)
60                 return;
61         ccw_device_set_timeout(cdev, 0);
62         cdev->private->flags.fake_irb = 0;
63         cdev->private->state = DEV_STATE_DISCONNECTED;
64 }
65
66 void device_set_intretry(struct subchannel *sch)
67 {
68         struct ccw_device *cdev;
69
70         cdev = sch_get_cdev(sch);
71         if (!cdev)
72                 return;
73         cdev->private->flags.intretry = 1;
74 }
75
76 int device_trigger_verify(struct subchannel *sch)
77 {
78         struct ccw_device *cdev;
79
80         cdev = sch_get_cdev(sch);
81         if (!cdev || !cdev->online)
82                 return -EINVAL;
83         dev_fsm_event(cdev, DEV_EVENT_VERIFY);
84         return 0;
85 }
86
87 static int __init ccw_timeout_log_setup(char *unused)
88 {
89         timeout_log_enabled = 1;
90         return 1;
91 }
92
93 __setup("ccw_timeout_log", ccw_timeout_log_setup);
94
95 static void ccw_timeout_log(struct ccw_device *cdev)
96 {
97         struct schib schib;
98         struct subchannel *sch;
99         struct io_subchannel_private *private;
100         int cc;
101
102         sch = to_subchannel(cdev->dev.parent);
103         private = to_io_private(sch);
104         cc = stsch(sch->schid, &schib);
105
106         printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
107                "device information:\n", get_clock());
108         printk(KERN_WARNING "cio: orb:\n");
109         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
110                        &private->orb, sizeof(private->orb), 0);
111         printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
112         printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
113         printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
114                "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
115
116         if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
117             (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
118                 printk(KERN_WARNING "cio: last channel program (intern):\n");
119         else
120                 printk(KERN_WARNING "cio: last channel program:\n");
121
122         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
123                        (void *)(addr_t)private->orb.cpa,
124                        sizeof(struct ccw1), 0);
125         printk(KERN_WARNING "cio: ccw device state: %d\n",
126                cdev->private->state);
127         printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
128         printk(KERN_WARNING "cio: schib:\n");
129         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
130                        &schib, sizeof(schib), 0);
131         printk(KERN_WARNING "cio: ccw device flags:\n");
132         print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
133                        &cdev->private->flags, sizeof(cdev->private->flags), 0);
134 }
135
136 /*
137  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
138  */
139 static void
140 ccw_device_timeout(unsigned long data)
141 {
142         struct ccw_device *cdev;
143
144         cdev = (struct ccw_device *) data;
145         spin_lock_irq(cdev->ccwlock);
146         if (timeout_log_enabled)
147                 ccw_timeout_log(cdev);
148         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
149         spin_unlock_irq(cdev->ccwlock);
150 }
151
152 /*
153  * Set timeout
154  */
155 void
156 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
157 {
158         if (expires == 0) {
159                 del_timer(&cdev->private->timer);
160                 return;
161         }
162         if (timer_pending(&cdev->private->timer)) {
163                 if (mod_timer(&cdev->private->timer, jiffies + expires))
164                         return;
165         }
166         cdev->private->timer.function = ccw_device_timeout;
167         cdev->private->timer.data = (unsigned long) cdev;
168         cdev->private->timer.expires = jiffies + expires;
169         add_timer(&cdev->private->timer);
170 }
171
172 /* Kill any pending timers after machine check. */
173 void
174 device_kill_pending_timer(struct subchannel *sch)
175 {
176         struct ccw_device *cdev;
177
178         cdev = sch_get_cdev(sch);
179         if (!cdev)
180                 return;
181         ccw_device_set_timeout(cdev, 0);
182 }
183
184 /*
185  * Cancel running i/o. This is called repeatedly since halt/clear are
186  * asynchronous operations. We do one try with cio_cancel, two tries
187  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
188  * Returns 0 if device now idle, -ENODEV for device not operational and
189  * -EBUSY if an interrupt is expected (either from halt/clear or from a
190  * status pending).
191  */
192 int
193 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
194 {
195         struct subchannel *sch;
196         int ret;
197
198         sch = to_subchannel(cdev->dev.parent);
199         ret = stsch(sch->schid, &sch->schib);
200         if (ret || !sch->schib.pmcw.dnv)
201                 return -ENODEV; 
202         if (!sch->schib.pmcw.ena)
203                 /* Not operational -> done. */
204                 return 0;
205         /* Stage 1: cancel io. */
206         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
207             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
208                 ret = cio_cancel(sch);
209                 if (ret != -EINVAL)
210                         return ret;
211                 /* cancel io unsuccessful. From now on it is asynchronous. */
212                 cdev->private->iretry = 3;      /* 3 halt retries. */
213         }
214         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
215                 /* Stage 2: halt io. */
216                 if (cdev->private->iretry) {
217                         cdev->private->iretry--;
218                         ret = cio_halt(sch);
219                         if (ret != -EBUSY)
220                                 return (ret == 0) ? -EBUSY : ret;
221                 }
222                 /* halt io unsuccessful. */
223                 cdev->private->iretry = 255;    /* 255 clear retries. */
224         }
225         /* Stage 3: clear io. */
226         if (cdev->private->iretry) {
227                 cdev->private->iretry--;
228                 ret = cio_clear (sch);
229                 return (ret == 0) ? -EBUSY : ret;
230         }
231         panic("Can't stop i/o on subchannel.\n");
232 }
233
234 static int
235 ccw_device_handle_oper(struct ccw_device *cdev)
236 {
237         struct subchannel *sch;
238
239         sch = to_subchannel(cdev->dev.parent);
240         cdev->private->flags.recog_done = 1;
241         /*
242          * Check if cu type and device type still match. If
243          * not, it is certainly another device and we have to
244          * de- and re-register.
245          */
246         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
247             cdev->id.cu_model != cdev->private->senseid.cu_model ||
248             cdev->id.dev_type != cdev->private->senseid.dev_type ||
249             cdev->id.dev_model != cdev->private->senseid.dev_model) {
250                 PREPARE_WORK(&cdev->private->kick_work,
251                              ccw_device_do_unreg_rereg);
252                 queue_work(ccw_device_work, &cdev->private->kick_work);
253                 return 0;
254         }
255         cdev->private->flags.donotify = 1;
256         return 1;
257 }
258
259 /*
260  * The machine won't give us any notification by machine check if a chpid has
261  * been varied online on the SE so we have to find out by magic (i. e. driving
262  * the channel subsystem to device selection and updating our path masks).
263  */
264 static void
265 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
266 {
267         int mask, i;
268         struct chp_id chpid;
269
270         chp_id_init(&chpid);
271         for (i = 0; i<8; i++) {
272                 mask = 0x80 >> i;
273                 if (!(sch->lpm & mask))
274                         continue;
275                 if (old_lpm & mask)
276                         continue;
277                 chpid.id = sch->schib.pmcw.chpid[i];
278                 if (!chp_is_registered(chpid))
279                         css_schedule_eval_all();
280         }
281 }
282
283 /*
284  * Stop device recognition.
285  */
286 static void
287 ccw_device_recog_done(struct ccw_device *cdev, int state)
288 {
289         struct subchannel *sch;
290         int notify, old_lpm, same_dev;
291
292         sch = to_subchannel(cdev->dev.parent);
293
294         ccw_device_set_timeout(cdev, 0);
295         cio_disable_subchannel(sch);
296         /*
297          * Now that we tried recognition, we have performed device selection
298          * through ssch() and the path information is up to date.
299          */
300         old_lpm = sch->lpm;
301         stsch(sch->schid, &sch->schib);
302         sch->lpm = sch->schib.pmcw.pam & sch->opm;
303         /* Check since device may again have become not operational. */
304         if (!sch->schib.pmcw.dnv)
305                 state = DEV_STATE_NOT_OPER;
306         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
307                 /* Force reprobe on all chpids. */
308                 old_lpm = 0;
309         if (sch->lpm != old_lpm)
310                 __recover_lost_chpids(sch, old_lpm);
311         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
312                 if (state == DEV_STATE_NOT_OPER) {
313                         cdev->private->flags.recog_done = 1;
314                         cdev->private->state = DEV_STATE_DISCONNECTED;
315                         return;
316                 }
317                 /* Boxed devices don't need extra treatment. */
318         }
319         notify = 0;
320         same_dev = 0; /* Keep the compiler quiet... */
321         switch (state) {
322         case DEV_STATE_NOT_OPER:
323                 CIO_DEBUG(KERN_WARNING, 2,
324                           "SenseID : unknown device %04x on subchannel "
325                           "0.%x.%04x\n", cdev->private->dev_id.devno,
326                           sch->schid.ssid, sch->schid.sch_no);
327                 break;
328         case DEV_STATE_OFFLINE:
329                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
330                         same_dev = ccw_device_handle_oper(cdev);
331                         notify = 1;
332                 }
333                 /* fill out sense information */
334                 memset(&cdev->id, 0, sizeof(cdev->id));
335                 cdev->id.cu_type   = cdev->private->senseid.cu_type;
336                 cdev->id.cu_model  = cdev->private->senseid.cu_model;
337                 cdev->id.dev_type  = cdev->private->senseid.dev_type;
338                 cdev->id.dev_model = cdev->private->senseid.dev_model;
339                 if (notify) {
340                         cdev->private->state = DEV_STATE_OFFLINE;
341                         if (same_dev) {
342                                 /* Get device online again. */
343                                 ccw_device_online(cdev);
344                                 wake_up(&cdev->private->wait_q);
345                         }
346                         return;
347                 }
348                 /* Issue device info message. */
349                 CIO_DEBUG(KERN_INFO, 2,
350                           "SenseID : device 0.%x.%04x reports: "
351                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
352                           "%04X/%02X\n",
353                           cdev->private->dev_id.ssid,
354                           cdev->private->dev_id.devno,
355                           cdev->id.cu_type, cdev->id.cu_model,
356                           cdev->id.dev_type, cdev->id.dev_model);
357                 break;
358         case DEV_STATE_BOXED:
359                 CIO_DEBUG(KERN_WARNING, 2,
360                           "SenseID : boxed device %04x on subchannel "
361                           "0.%x.%04x\n", cdev->private->dev_id.devno,
362                           sch->schid.ssid, sch->schid.sch_no);
363                 break;
364         }
365         cdev->private->state = state;
366         io_subchannel_recog_done(cdev);
367         if (state != DEV_STATE_NOT_OPER)
368                 wake_up(&cdev->private->wait_q);
369 }
370
371 /*
372  * Function called from device_id.c after sense id has completed.
373  */
374 void
375 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
376 {
377         switch (err) {
378         case 0:
379                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
380                 break;
381         case -ETIME:            /* Sense id stopped by timeout. */
382                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
383                 break;
384         default:
385                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
386                 break;
387         }
388 }
389
390 static void
391 ccw_device_oper_notify(struct work_struct *work)
392 {
393         struct ccw_device_private *priv;
394         struct ccw_device *cdev;
395         struct subchannel *sch;
396         int ret;
397         unsigned long flags;
398
399         priv = container_of(work, struct ccw_device_private, kick_work);
400         cdev = priv->cdev;
401         spin_lock_irqsave(cdev->ccwlock, flags);
402         sch = to_subchannel(cdev->dev.parent);
403         if (sch->driver && sch->driver->notify) {
404                 spin_unlock_irqrestore(cdev->ccwlock, flags);
405                 ret = sch->driver->notify(sch, CIO_OPER);
406                 spin_lock_irqsave(cdev->ccwlock, flags);
407         } else
408                 ret = 0;
409         if (ret) {
410                 /* Reenable channel measurements, if needed. */
411                 spin_unlock_irqrestore(cdev->ccwlock, flags);
412                 cmf_reenable(cdev);
413                 spin_lock_irqsave(cdev->ccwlock, flags);
414                 wake_up(&cdev->private->wait_q);
415         }
416         spin_unlock_irqrestore(cdev->ccwlock, flags);
417         if (!ret)
418                 /* Driver doesn't want device back. */
419                 ccw_device_do_unreg_rereg(work);
420 }
421
422 /*
423  * Finished with online/offline processing.
424  */
425 static void
426 ccw_device_done(struct ccw_device *cdev, int state)
427 {
428         struct subchannel *sch;
429
430         sch = to_subchannel(cdev->dev.parent);
431
432         ccw_device_set_timeout(cdev, 0);
433
434         if (state != DEV_STATE_ONLINE)
435                 cio_disable_subchannel(sch);
436
437         /* Reset device status. */
438         memset(&cdev->private->irb, 0, sizeof(struct irb));
439
440         cdev->private->state = state;
441
442
443         if (state == DEV_STATE_BOXED)
444                 CIO_DEBUG(KERN_WARNING, 2,
445                           "Boxed device %04x on subchannel %04x\n",
446                           cdev->private->dev_id.devno, sch->schid.sch_no);
447
448         if (cdev->private->flags.donotify) {
449                 cdev->private->flags.donotify = 0;
450                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
451                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
452         }
453         wake_up(&cdev->private->wait_q);
454
455         if (css_init_done && state != DEV_STATE_ONLINE)
456                 put_device (&cdev->dev);
457 }
458
459 static int cmp_pgid(struct pgid *p1, struct pgid *p2)
460 {
461         char *c1;
462         char *c2;
463
464         c1 = (char *)p1;
465         c2 = (char *)p2;
466
467         return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
468 }
469
470 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
471 {
472         int i;
473         int last;
474
475         last = 0;
476         for (i = 0; i < 8; i++) {
477                 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
478                         /* No PGID yet */
479                         continue;
480                 if (cdev->private->pgid[last].inf.ps.state1 ==
481                     SNID_STATE1_RESET) {
482                         /* First non-zero PGID */
483                         last = i;
484                         continue;
485                 }
486                 if (cmp_pgid(&cdev->private->pgid[i],
487                              &cdev->private->pgid[last]) == 0)
488                         /* Non-conflicting PGIDs */
489                         continue;
490
491                 /* PGID mismatch, can't pathgroup. */
492                 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
493                               "0.%x.%04x, can't pathgroup\n",
494                               cdev->private->dev_id.ssid,
495                               cdev->private->dev_id.devno);
496                 cdev->private->options.pgroup = 0;
497                 return;
498         }
499         if (cdev->private->pgid[last].inf.ps.state1 ==
500             SNID_STATE1_RESET)
501                 /* No previous pgid found */
502                 memcpy(&cdev->private->pgid[0],
503                        &channel_subsystems[0]->global_pgid,
504                        sizeof(struct pgid));
505         else
506                 /* Use existing pgid */
507                 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
508                        sizeof(struct pgid));
509 }
510
511 /*
512  * Function called from device_pgid.c after sense path ground has completed.
513  */
514 void
515 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
516 {
517         struct subchannel *sch;
518
519         sch = to_subchannel(cdev->dev.parent);
520         switch (err) {
521         case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
522                 cdev->private->options.pgroup = 0;
523                 break;
524         case 0: /* success */
525         case -EACCES: /* partial success, some paths not operational */
526                 /* Check if all pgids are equal or 0. */
527                 __ccw_device_get_common_pgid(cdev);
528                 break;
529         case -ETIME:            /* Sense path group id stopped by timeout. */
530         case -EUSERS:           /* device is reserved for someone else. */
531                 ccw_device_done(cdev, DEV_STATE_BOXED);
532                 return;
533         default:
534                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
535                 return;
536         }
537         /* Start Path Group verification. */
538         cdev->private->state = DEV_STATE_VERIFY;
539         cdev->private->flags.doverify = 0;
540         ccw_device_verify_start(cdev);
541 }
542
543 /*
544  * Start device recognition.
545  */
546 int
547 ccw_device_recognition(struct ccw_device *cdev)
548 {
549         struct subchannel *sch;
550         int ret;
551
552         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
553             (cdev->private->state != DEV_STATE_BOXED))
554                 return -EINVAL;
555         sch = to_subchannel(cdev->dev.parent);
556         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
557                                     (u32)(addr_t)sch);
558         if (ret != 0)
559                 /* Couldn't enable the subchannel for i/o. Sick device. */
560                 return ret;
561
562         /* After 60s the device recognition is considered to have failed. */
563         ccw_device_set_timeout(cdev, 60*HZ);
564
565         /*
566          * We used to start here with a sense pgid to find out whether a device
567          * is locked by someone else. Unfortunately, the sense pgid command
568          * code has other meanings on devices predating the path grouping
569          * algorithm, so we start with sense id and box the device after an
570          * timeout (or if sense pgid during path verification detects the device
571          * is locked, as may happen on newer devices).
572          */
573         cdev->private->flags.recog_done = 0;
574         cdev->private->state = DEV_STATE_SENSE_ID;
575         ccw_device_sense_id_start(cdev);
576         return 0;
577 }
578
579 /*
580  * Handle timeout in device recognition.
581  */
582 static void
583 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
584 {
585         int ret;
586
587         ret = ccw_device_cancel_halt_clear(cdev);
588         switch (ret) {
589         case 0:
590                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
591                 break;
592         case -ENODEV:
593                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
594                 break;
595         default:
596                 ccw_device_set_timeout(cdev, 3*HZ);
597         }
598 }
599
600
601 void
602 ccw_device_verify_done(struct ccw_device *cdev, int err)
603 {
604         struct subchannel *sch;
605
606         sch = to_subchannel(cdev->dev.parent);
607         /* Update schib - pom may have changed. */
608         stsch(sch->schid, &sch->schib);
609         /* Update lpm with verified path mask. */
610         sch->lpm = sch->vpm;
611         /* Repeat path verification? */
612         if (cdev->private->flags.doverify) {
613                 cdev->private->flags.doverify = 0;
614                 ccw_device_verify_start(cdev);
615                 return;
616         }
617         switch (err) {
618         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
619                 cdev->private->options.pgroup = 0;
620         case 0:
621                 ccw_device_done(cdev, DEV_STATE_ONLINE);
622                 /* Deliver fake irb to device driver, if needed. */
623                 if (cdev->private->flags.fake_irb) {
624                         memset(&cdev->private->irb, 0, sizeof(struct irb));
625                         cdev->private->irb.scsw.cc = 1;
626                         cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
627                         cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
628                         cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
629                         cdev->private->flags.fake_irb = 0;
630                         if (cdev->handler)
631                                 cdev->handler(cdev, cdev->private->intparm,
632                                               &cdev->private->irb);
633                         memset(&cdev->private->irb, 0, sizeof(struct irb));
634                 }
635                 break;
636         case -ETIME:
637                 /* Reset oper notify indication after verify error. */
638                 cdev->private->flags.donotify = 0;
639                 ccw_device_done(cdev, DEV_STATE_BOXED);
640                 break;
641         default:
642                 /* Reset oper notify indication after verify error. */
643                 cdev->private->flags.donotify = 0;
644                 if (cdev->online)
645                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
646                 else
647                         ccw_device_done(cdev, DEV_STATE_NOT_OPER);
648                 break;
649         }
650 }
651
652 /*
653  * Get device online.
654  */
655 int
656 ccw_device_online(struct ccw_device *cdev)
657 {
658         struct subchannel *sch;
659         int ret;
660
661         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
662             (cdev->private->state != DEV_STATE_BOXED))
663                 return -EINVAL;
664         sch = to_subchannel(cdev->dev.parent);
665         if (css_init_done && !get_device(&cdev->dev))
666                 return -ENODEV;
667         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
668                                     (u32)(addr_t)sch);
669         if (ret != 0) {
670                 /* Couldn't enable the subchannel for i/o. Sick device. */
671                 if (ret == -ENODEV)
672                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
673                 return ret;
674         }
675         /* Do we want to do path grouping? */
676         if (!cdev->private->options.pgroup) {
677                 /* Start initial path verification. */
678                 cdev->private->state = DEV_STATE_VERIFY;
679                 cdev->private->flags.doverify = 0;
680                 ccw_device_verify_start(cdev);
681                 return 0;
682         }
683         /* Do a SensePGID first. */
684         cdev->private->state = DEV_STATE_SENSE_PGID;
685         ccw_device_sense_pgid_start(cdev);
686         return 0;
687 }
688
689 void
690 ccw_device_disband_done(struct ccw_device *cdev, int err)
691 {
692         switch (err) {
693         case 0:
694                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
695                 break;
696         case -ETIME:
697                 ccw_device_done(cdev, DEV_STATE_BOXED);
698                 break;
699         default:
700                 cdev->private->flags.donotify = 0;
701                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
702                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
703                 break;
704         }
705 }
706
707 /*
708  * Shutdown device.
709  */
710 int
711 ccw_device_offline(struct ccw_device *cdev)
712 {
713         struct subchannel *sch;
714
715         if (ccw_device_is_orphan(cdev)) {
716                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
717                 return 0;
718         }
719         sch = to_subchannel(cdev->dev.parent);
720         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
721                 return -ENODEV;
722         if (cdev->private->state != DEV_STATE_ONLINE) {
723                 if (sch->schib.scsw.actl != 0)
724                         return -EBUSY;
725                 return -EINVAL;
726         }
727         if (sch->schib.scsw.actl != 0)
728                 return -EBUSY;
729         /* Are we doing path grouping? */
730         if (!cdev->private->options.pgroup) {
731                 /* No, set state offline immediately. */
732                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
733                 return 0;
734         }
735         /* Start Set Path Group commands. */
736         cdev->private->state = DEV_STATE_DISBAND_PGID;
737         ccw_device_disband_start(cdev);
738         return 0;
739 }
740
741 /*
742  * Handle timeout in device online/offline process.
743  */
744 static void
745 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
746 {
747         int ret;
748
749         ret = ccw_device_cancel_halt_clear(cdev);
750         switch (ret) {
751         case 0:
752                 ccw_device_done(cdev, DEV_STATE_BOXED);
753                 break;
754         case -ENODEV:
755                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
756                 break;
757         default:
758                 ccw_device_set_timeout(cdev, 3*HZ);
759         }
760 }
761
762 /*
763  * Handle not oper event in device recognition.
764  */
765 static void
766 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
767 {
768         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
769 }
770
771 /*
772  * Handle not operational event in non-special state.
773  */
774 static void ccw_device_generic_notoper(struct ccw_device *cdev,
775                                        enum dev_event dev_event)
776 {
777         struct subchannel *sch;
778
779         cdev->private->state = DEV_STATE_NOT_OPER;
780         sch = to_subchannel(cdev->dev.parent);
781         css_schedule_eval(sch->schid);
782 }
783
784 /*
785  * Handle path verification event.
786  */
787 static void
788 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
789 {
790         struct subchannel *sch;
791
792         if (cdev->private->state == DEV_STATE_W4SENSE) {
793                 cdev->private->flags.doverify = 1;
794                 return;
795         }
796         sch = to_subchannel(cdev->dev.parent);
797         /*
798          * Since we might not just be coming from an interrupt from the
799          * subchannel we have to update the schib.
800          */
801         stsch(sch->schid, &sch->schib);
802
803         if (sch->schib.scsw.actl != 0 ||
804             (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
805             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
806                 /*
807                  * No final status yet or final status not yet delivered
808                  * to the device driver. Can't do path verfication now,
809                  * delay until final status was delivered.
810                  */
811                 cdev->private->flags.doverify = 1;
812                 return;
813         }
814         /* Device is idle, we can do the path verification. */
815         cdev->private->state = DEV_STATE_VERIFY;
816         cdev->private->flags.doverify = 0;
817         ccw_device_verify_start(cdev);
818 }
819
820 /*
821  * Got an interrupt for a normal io (state online).
822  */
823 static void
824 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
825 {
826         struct irb *irb;
827
828         irb = (struct irb *) __LC_IRB;
829         /* Check for unsolicited interrupt. */
830         if ((irb->scsw.stctl ==
831                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
832             && (!irb->scsw.cc)) {
833                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
834                     !irb->esw.esw0.erw.cons) {
835                         /* Unit check but no sense data. Need basic sense. */
836                         if (ccw_device_do_sense(cdev, irb) != 0)
837                                 goto call_handler_unsol;
838                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
839                         cdev->private->state = DEV_STATE_W4SENSE;
840                         cdev->private->intparm = 0;
841                         return;
842                 }
843 call_handler_unsol:
844                 if (cdev->handler)
845                         cdev->handler (cdev, 0, irb);
846                 if (cdev->private->flags.doverify)
847                         ccw_device_online_verify(cdev, 0);
848                 return;
849         }
850         /* Accumulate status and find out if a basic sense is needed. */
851         ccw_device_accumulate_irb(cdev, irb);
852         if (cdev->private->flags.dosense) {
853                 if (ccw_device_do_sense(cdev, irb) == 0) {
854                         cdev->private->state = DEV_STATE_W4SENSE;
855                 }
856                 return;
857         }
858         /* Call the handler. */
859         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
860                 /* Start delayed path verification. */
861                 ccw_device_online_verify(cdev, 0);
862 }
863
864 /*
865  * Got an timeout in online state.
866  */
867 static void
868 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
869 {
870         int ret;
871
872         ccw_device_set_timeout(cdev, 0);
873         ret = ccw_device_cancel_halt_clear(cdev);
874         if (ret == -EBUSY) {
875                 ccw_device_set_timeout(cdev, 3*HZ);
876                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
877                 return;
878         }
879         if (ret == -ENODEV)
880                 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
881         else if (cdev->handler)
882                 cdev->handler(cdev, cdev->private->intparm,
883                               ERR_PTR(-ETIMEDOUT));
884 }
885
886 /*
887  * Got an interrupt for a basic sense.
888  */
889 static void
890 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
891 {
892         struct irb *irb;
893
894         irb = (struct irb *) __LC_IRB;
895         /* Check for unsolicited interrupt. */
896         if (irb->scsw.stctl ==
897                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
898                 if (irb->scsw.cc == 1)
899                         /* Basic sense hasn't started. Try again. */
900                         ccw_device_do_sense(cdev, irb);
901                 else {
902                         CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited "
903                                       "interrupt during w4sense...\n",
904                                       cdev->private->dev_id.ssid,
905                                       cdev->private->dev_id.devno);
906                         if (cdev->handler)
907                                 cdev->handler (cdev, 0, irb);
908                 }
909                 return;
910         }
911         /*
912          * Check if a halt or clear has been issued in the meanwhile. If yes,
913          * only deliver the halt/clear interrupt to the device driver as if it
914          * had killed the original request.
915          */
916         if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
917                 /* Retry Basic Sense if requested. */
918                 if (cdev->private->flags.intretry) {
919                         cdev->private->flags.intretry = 0;
920                         ccw_device_do_sense(cdev, irb);
921                         return;
922                 }
923                 cdev->private->flags.dosense = 0;
924                 memset(&cdev->private->irb, 0, sizeof(struct irb));
925                 ccw_device_accumulate_irb(cdev, irb);
926                 goto call_handler;
927         }
928         /* Add basic sense info to irb. */
929         ccw_device_accumulate_basic_sense(cdev, irb);
930         if (cdev->private->flags.dosense) {
931                 /* Another basic sense is needed. */
932                 ccw_device_do_sense(cdev, irb);
933                 return;
934         }
935 call_handler:
936         cdev->private->state = DEV_STATE_ONLINE;
937         /* Call the handler. */
938         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
939                 /* Start delayed path verification. */
940                 ccw_device_online_verify(cdev, 0);
941 }
942
943 static void
944 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
945 {
946         struct irb *irb;
947
948         irb = (struct irb *) __LC_IRB;
949         /* Accumulate status. We don't do basic sense. */
950         ccw_device_accumulate_irb(cdev, irb);
951         /* Remember to clear irb to avoid residuals. */
952         memset(&cdev->private->irb, 0, sizeof(struct irb));
953         /* Try to start delayed device verification. */
954         ccw_device_online_verify(cdev, 0);
955         /* Note: Don't call handler for cio initiated clear! */
956 }
957
958 static void
959 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
960 {
961         struct subchannel *sch;
962
963         sch = to_subchannel(cdev->dev.parent);
964         ccw_device_set_timeout(cdev, 0);
965         /* Start delayed path verification. */
966         ccw_device_online_verify(cdev, 0);
967         /* OK, i/o is dead now. Call interrupt handler. */
968         if (cdev->handler)
969                 cdev->handler(cdev, cdev->private->intparm,
970                               ERR_PTR(-EIO));
971 }
972
973 static void
974 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
975 {
976         int ret;
977
978         ret = ccw_device_cancel_halt_clear(cdev);
979         if (ret == -EBUSY) {
980                 ccw_device_set_timeout(cdev, 3*HZ);
981                 return;
982         }
983         /* Start delayed path verification. */
984         ccw_device_online_verify(cdev, 0);
985         if (cdev->handler)
986                 cdev->handler(cdev, cdev->private->intparm,
987                               ERR_PTR(-EIO));
988 }
989
990 void device_kill_io(struct subchannel *sch)
991 {
992         int ret;
993         struct ccw_device *cdev;
994
995         cdev = sch_get_cdev(sch);
996         ret = ccw_device_cancel_halt_clear(cdev);
997         if (ret == -EBUSY) {
998                 ccw_device_set_timeout(cdev, 3*HZ);
999                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
1000                 return;
1001         }
1002         /* Start delayed path verification. */
1003         ccw_device_online_verify(cdev, 0);
1004         if (cdev->handler)
1005                 cdev->handler(cdev, cdev->private->intparm,
1006                               ERR_PTR(-EIO));
1007 }
1008
1009 static void
1010 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1011 {
1012         /* Start verification after current task finished. */
1013         cdev->private->flags.doverify = 1;
1014 }
1015
1016 static void
1017 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1018 {
1019         struct irb *irb;
1020
1021         switch (dev_event) {
1022         case DEV_EVENT_INTERRUPT:
1023                 irb = (struct irb *) __LC_IRB;
1024                 /* Check for unsolicited interrupt. */
1025                 if ((irb->scsw.stctl ==
1026                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1027                     (!irb->scsw.cc))
1028                         /* FIXME: we should restart stlck here, but this
1029                          * is extremely unlikely ... */
1030                         goto out_wakeup;
1031
1032                 ccw_device_accumulate_irb(cdev, irb);
1033                 /* We don't care about basic sense etc. */
1034                 break;
1035         default: /* timeout */
1036                 break;
1037         }
1038 out_wakeup:
1039         wake_up(&cdev->private->wait_q);
1040 }
1041
1042 static void
1043 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1044 {
1045         struct subchannel *sch;
1046
1047         sch = to_subchannel(cdev->dev.parent);
1048         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc,
1049                                   (u32)(addr_t)sch) != 0)
1050                 /* Couldn't enable the subchannel for i/o. Sick device. */
1051                 return;
1052
1053         /* After 60s the device recognition is considered to have failed. */
1054         ccw_device_set_timeout(cdev, 60*HZ);
1055
1056         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1057         ccw_device_sense_id_start(cdev);
1058 }
1059
1060 void
1061 device_trigger_reprobe(struct subchannel *sch)
1062 {
1063         struct ccw_device *cdev;
1064
1065         cdev = sch_get_cdev(sch);
1066         if (!cdev)
1067                 return;
1068         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1069                 return;
1070
1071         /* Update some values. */
1072         if (stsch(sch->schid, &sch->schib))
1073                 return;
1074         if (!sch->schib.pmcw.dnv)
1075                 return;
1076         /*
1077          * The pim, pam, pom values may not be accurate, but they are the best
1078          * we have before performing device selection :/
1079          */
1080         sch->lpm = sch->schib.pmcw.pam & sch->opm;
1081         /* Re-set some bits in the pmcw that were lost. */
1082         sch->schib.pmcw.isc = 3;
1083         sch->schib.pmcw.csense = 1;
1084         sch->schib.pmcw.ena = 0;
1085         if ((sch->lpm & (sch->lpm - 1)) != 0)
1086                 sch->schib.pmcw.mp = 1;
1087         sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1088         /* We should also udate ssd info, but this has to wait. */
1089         /* Check if this is another device which appeared on the same sch. */
1090         if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1091                 PREPARE_WORK(&cdev->private->kick_work,
1092                              ccw_device_move_to_orphanage);
1093                 queue_work(slow_path_wq, &cdev->private->kick_work);
1094         } else
1095                 ccw_device_start_id(cdev, 0);
1096 }
1097
1098 static void
1099 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1100 {
1101         struct subchannel *sch;
1102
1103         sch = to_subchannel(cdev->dev.parent);
1104         /*
1105          * An interrupt in state offline means a previous disable was not
1106          * successful. Try again.
1107          */
1108         cio_disable_subchannel(sch);
1109 }
1110
1111 static void
1112 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1113 {
1114         retry_set_schib(cdev);
1115         cdev->private->state = DEV_STATE_ONLINE;
1116         dev_fsm_event(cdev, dev_event);
1117 }
1118
1119 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1120                                        enum dev_event dev_event)
1121 {
1122         cmf_retry_copy_block(cdev);
1123         cdev->private->state = DEV_STATE_ONLINE;
1124         dev_fsm_event(cdev, dev_event);
1125 }
1126
1127 static void
1128 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1129 {
1130         ccw_device_set_timeout(cdev, 0);
1131         if (dev_event == DEV_EVENT_NOTOPER)
1132                 cdev->private->state = DEV_STATE_NOT_OPER;
1133         else
1134                 cdev->private->state = DEV_STATE_OFFLINE;
1135         wake_up(&cdev->private->wait_q);
1136 }
1137
1138 static void
1139 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1140 {
1141         int ret;
1142
1143         ret = ccw_device_cancel_halt_clear(cdev);
1144         switch (ret) {
1145         case 0:
1146                 cdev->private->state = DEV_STATE_OFFLINE;
1147                 wake_up(&cdev->private->wait_q);
1148                 break;
1149         case -ENODEV:
1150                 cdev->private->state = DEV_STATE_NOT_OPER;
1151                 wake_up(&cdev->private->wait_q);
1152                 break;
1153         default:
1154                 ccw_device_set_timeout(cdev, HZ/10);
1155         }
1156 }
1157
1158 /*
1159  * No operation action. This is used e.g. to ignore a timeout event in
1160  * state offline.
1161  */
1162 static void
1163 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1164 {
1165 }
1166
1167 /*
1168  * Bug operation action. 
1169  */
1170 static void
1171 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1172 {
1173         CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n",
1174                       cdev->private->state, dev_event);
1175         BUG();
1176 }
1177
1178 /*
1179  * device statemachine
1180  */
1181 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1182         [DEV_STATE_NOT_OPER] = {
1183                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1184                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1185                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1186                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1187         },
1188         [DEV_STATE_SENSE_PGID] = {
1189                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1190                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1191                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1192                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1193         },
1194         [DEV_STATE_SENSE_ID] = {
1195                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1196                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1197                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1198                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1199         },
1200         [DEV_STATE_OFFLINE] = {
1201                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1202                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1203                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1204                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1205         },
1206         [DEV_STATE_VERIFY] = {
1207                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1208                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1209                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1210                 [DEV_EVENT_VERIFY]      = ccw_device_delay_verify,
1211         },
1212         [DEV_STATE_ONLINE] = {
1213                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1214                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1215                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1216                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1217         },
1218         [DEV_STATE_W4SENSE] = {
1219                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1220                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1221                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1222                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1223         },
1224         [DEV_STATE_DISBAND_PGID] = {
1225                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1226                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1227                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1228                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1229         },
1230         [DEV_STATE_BOXED] = {
1231                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1232                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1233                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1234                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1235         },
1236         /* states to wait for i/o completion before doing something */
1237         [DEV_STATE_CLEAR_VERIFY] = {
1238                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1239                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1240                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1241                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1242         },
1243         [DEV_STATE_TIMEOUT_KILL] = {
1244                 [DEV_EVENT_NOTOPER]     = ccw_device_generic_notoper,
1245                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1246                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1247                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1248         },
1249         [DEV_STATE_QUIESCE] = {
1250                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1251                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1252                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1253                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1254         },
1255         /* special states for devices gone not operational */
1256         [DEV_STATE_DISCONNECTED] = {
1257                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1258                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1259                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1260                 [DEV_EVENT_VERIFY]      = ccw_device_start_id,
1261         },
1262         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1263                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1264                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1265                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1266                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1267         },
1268         [DEV_STATE_CMFCHANGE] = {
1269                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1270                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1271                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1272                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1273         },
1274         [DEV_STATE_CMFUPDATE] = {
1275                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1276                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1277                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1278                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1279         },
1280 };
1281
1282 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);