[S390] cio: Re-start path verification after aborting internal I/O.
[pandora-kernel.git] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *                            IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  *               Arnd Bergmann (arndb@de.ibm.com)
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 /* FIXME: this is _always_ called for every subchannel. shouldn't we
30  *        process more than one at a time? */
31 static int
32 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
33 {
34         int ccode, j;
35
36         struct {
37                 struct chsc_header request;
38                 u16 reserved1a:10;
39                 u16 ssid:2;
40                 u16 reserved1b:4;
41                 u16 f_sch;        /* first subchannel */
42                 u16 reserved2;
43                 u16 l_sch;        /* last subchannel */
44                 u32 reserved3;
45                 struct chsc_header response;
46                 u32 reserved4;
47                 u8 sch_valid : 1;
48                 u8 dev_valid : 1;
49                 u8 st        : 3; /* subchannel type */
50                 u8 zeroes    : 3;
51                 u8  unit_addr;    /* unit address */
52                 u16 devno;        /* device number */
53                 u8 path_mask;
54                 u8 fla_valid_mask;
55                 u16 sch;          /* subchannel */
56                 u8 chpid[8];      /* chpids 0-7 */
57                 u16 fla[8];       /* full link addresses 0-7 */
58         } __attribute__ ((packed)) *ssd_area;
59
60         ssd_area = page;
61
62         ssd_area->request.length = 0x0010;
63         ssd_area->request.code = 0x0004;
64
65         ssd_area->ssid = sch->schid.ssid;
66         ssd_area->f_sch = sch->schid.sch_no;
67         ssd_area->l_sch = sch->schid.sch_no;
68
69         ccode = chsc(ssd_area);
70         if (ccode > 0) {
71                 pr_debug("chsc returned with ccode = %d\n", ccode);
72                 return (ccode == 3) ? -ENODEV : -EBUSY;
73         }
74
75         switch (ssd_area->response.code) {
76         case 0x0001: /* everything ok */
77                 break;
78         case 0x0002:
79                 CIO_CRW_EVENT(2, "Invalid command!\n");
80                 return -EINVAL;
81         case 0x0003:
82                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
83                 return -EINVAL;
84         case 0x0004:
85                 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
86                 return -EOPNOTSUPP;
87         default:
88                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
89                               ssd_area->response.code);
90                 return -EIO;
91         }
92
93         /*
94          * ssd_area->st stores the type of the detected
95          * subchannel, with the following definitions:
96          *
97          * 0: I/O subchannel:     All fields have meaning
98          * 1: CHSC subchannel:    Only sch_val, st and sch
99          *                        have meaning
100          * 2: Message subchannel: All fields except unit_addr
101          *                        have meaning
102          * 3: ADM subchannel:     Only sch_val, st and sch
103          *                        have meaning
104          *
105          * Other types are currently undefined.
106          */
107         if (ssd_area->st > 3) { /* uhm, that looks strange... */
108                 CIO_CRW_EVENT(0, "Strange subchannel type %d"
109                               " for sch 0.%x.%04x\n", ssd_area->st,
110                               sch->schid.ssid, sch->schid.sch_no);
111                 /*
112                  * There may have been a new subchannel type defined in the
113                  * time since this code was written; since we don't know which
114                  * fields have meaning and what to do with it we just jump out
115                  */
116                 return 0;
117         } else {
118                 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
119                 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
120                               sch->schid.ssid, sch->schid.sch_no,
121                               type[ssd_area->st]);
122
123                 sch->ssd_info.valid = 1;
124                 sch->ssd_info.type = ssd_area->st;
125         }
126
127         if (ssd_area->st == 0 || ssd_area->st == 2) {
128                 for (j = 0; j < 8; j++) {
129                         if (!((0x80 >> j) & ssd_area->path_mask &
130                               ssd_area->fla_valid_mask))
131                                 continue;
132                         sch->ssd_info.chpid[j] = ssd_area->chpid[j];
133                         sch->ssd_info.fla[j]   = ssd_area->fla[j];
134                 }
135         }
136         return 0;
137 }
138
139 int
140 css_get_ssd_info(struct subchannel *sch)
141 {
142         int ret;
143         void *page;
144
145         page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
146         if (!page)
147                 return -ENOMEM;
148         spin_lock_irq(sch->lock);
149         ret = chsc_get_sch_desc_irq(sch, page);
150         if (ret) {
151                 static int cio_chsc_err_msg;
152                 
153                 if (!cio_chsc_err_msg) {
154                         printk(KERN_ERR
155                                "chsc_get_sch_descriptions:"
156                                " Error %d while doing chsc; "
157                                "processing some machine checks may "
158                                "not work\n", ret);
159                         cio_chsc_err_msg = 1;
160                 }
161         }
162         spin_unlock_irq(sch->lock);
163         free_page((unsigned long)page);
164         if (!ret) {
165                 int j, mask;
166                 struct chp_id chpid;
167
168                 chp_id_init(&chpid);
169                 /* Allocate channel path structures, if needed. */
170                 for (j = 0; j < 8; j++) {
171                         mask = 0x80 >> j;
172                         chpid.id = sch->ssd_info.chpid[j];
173                         if ((sch->schib.pmcw.pim & mask) &&
174                             !chp_is_registered(chpid))
175                                 chp_new(chpid);
176                 }
177         }
178         return ret;
179 }
180
181 static int check_for_io_on_path(struct subchannel *sch, int mask)
182 {
183         int cc;
184
185         cc = stsch(sch->schid, &sch->schib);
186         if (cc)
187                 return 0;
188         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
189                 return 1;
190         return 0;
191 }
192
193 static void terminate_internal_io(struct subchannel *sch)
194 {
195         if (cio_clear(sch)) {
196                 /* Recheck device in case clear failed. */
197                 sch->lpm = 0;
198                 if (device_trigger_verify(sch) != 0) {
199                         if(css_enqueue_subchannel_slow(sch->schid)) {
200                                 css_clear_subchannel_slow_list();
201                                 need_rescan = 1;
202                         }
203                 }
204                 return;
205         }
206         /* Request retry of internal operation. */
207         device_set_intretry(sch);
208         /* Call handler. */
209         if (sch->driver && sch->driver->termination)
210                 sch->driver->termination(&sch->dev);
211 }
212
213 static int
214 s390_subchannel_remove_chpid(struct device *dev, void *data)
215 {
216         int j;
217         int mask;
218         struct subchannel *sch;
219         struct chp_id *chpid;
220         struct schib schib;
221
222         sch = to_subchannel(dev);
223         chpid = data;
224         for (j = 0; j < 8; j++) {
225                 mask = 0x80 >> j;
226                 if ((sch->schib.pmcw.pim & mask) &&
227                     (sch->schib.pmcw.chpid[j] == chpid->id))
228                         break;
229         }
230         if (j >= 8)
231                 return 0;
232
233         spin_lock_irq(sch->lock);
234
235         stsch(sch->schid, &schib);
236         if (!schib.pmcw.dnv)
237                 goto out_unreg;
238         memcpy(&sch->schib, &schib, sizeof(struct schib));
239         /* Check for single path devices. */
240         if (sch->schib.pmcw.pim == 0x80)
241                 goto out_unreg;
242
243         if (check_for_io_on_path(sch, mask)) {
244                 if (device_is_online(sch))
245                         device_kill_io(sch);
246                 else {
247                         terminate_internal_io(sch);
248                         /* Re-start path verification. */
249                         if (sch->driver && sch->driver->verify)
250                                 sch->driver->verify(&sch->dev);
251                 }
252         } else {
253                 /* trigger path verification. */
254                 if (sch->driver && sch->driver->verify)
255                         sch->driver->verify(&sch->dev);
256                 else if (sch->lpm == mask)
257                         goto out_unreg;
258         }
259
260         spin_unlock_irq(sch->lock);
261         return 0;
262
263 out_unreg:
264         sch->lpm = 0;
265         if (css_enqueue_subchannel_slow(sch->schid)) {
266                 css_clear_subchannel_slow_list();
267                 need_rescan = 1;
268         }
269         spin_unlock_irq(sch->lock);
270         return 0;
271 }
272
273 void chsc_chp_offline(struct chp_id chpid)
274 {
275         char dbf_txt[15];
276
277         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
278         CIO_TRACE_EVENT(2, dbf_txt);
279
280         if (chp_get_status(chpid) <= 0)
281                 return;
282         bus_for_each_dev(&css_bus_type, NULL, &chpid,
283                          s390_subchannel_remove_chpid);
284
285         if (need_rescan || css_slow_subchannels_exist())
286                 queue_work(slow_path_wq, &slow_path_work);
287 }
288
289 struct res_acc_data {
290         struct chp_id chpid;
291         u32 fla_mask;
292         u16 fla;
293 };
294
295 static int s390_process_res_acc_sch(struct res_acc_data *res_data,
296                                     struct subchannel *sch)
297 {
298         int found;
299         int chp;
300         int ccode;
301
302         found = 0;
303         for (chp = 0; chp <= 7; chp++)
304                 /*
305                  * check if chpid is in information updated by ssd
306                  */
307                 if (sch->ssd_info.valid &&
308                     sch->ssd_info.chpid[chp] == res_data->chpid.id &&
309                     (sch->ssd_info.fla[chp] & res_data->fla_mask)
310                     == res_data->fla) {
311                         found = 1;
312                         break;
313                 }
314
315         if (found == 0)
316                 return 0;
317
318         /*
319          * Do a stsch to update our subchannel structure with the
320          * new path information and eventually check for logically
321          * offline chpids.
322          */
323         ccode = stsch(sch->schid, &sch->schib);
324         if (ccode > 0)
325                 return 0;
326
327         return 0x80 >> chp;
328 }
329
330 static int
331 s390_process_res_acc_new_sch(struct subchannel_id schid)
332 {
333         struct schib schib;
334         int ret;
335         /*
336          * We don't know the device yet, but since a path
337          * may be available now to the device we'll have
338          * to do recognition again.
339          * Since we don't have any idea about which chpid
340          * that beast may be on we'll have to do a stsch
341          * on all devices, grr...
342          */
343         if (stsch_err(schid, &schib))
344                 /* We're through */
345                 return need_rescan ? -EAGAIN : -ENXIO;
346
347         /* Put it on the slow path. */
348         ret = css_enqueue_subchannel_slow(schid);
349         if (ret) {
350                 css_clear_subchannel_slow_list();
351                 need_rescan = 1;
352                 return -EAGAIN;
353         }
354         return 0;
355 }
356
357 static int
358 __s390_process_res_acc(struct subchannel_id schid, void *data)
359 {
360         int chp_mask, old_lpm;
361         struct res_acc_data *res_data;
362         struct subchannel *sch;
363
364         res_data = data;
365         sch = get_subchannel_by_schid(schid);
366         if (!sch)
367                 /* Check if a subchannel is newly available. */
368                 return s390_process_res_acc_new_sch(schid);
369
370         spin_lock_irq(sch->lock);
371
372         chp_mask = s390_process_res_acc_sch(res_data, sch);
373
374         if (chp_mask == 0) {
375                 spin_unlock_irq(sch->lock);
376                 put_device(&sch->dev);
377                 return 0;
378         }
379         old_lpm = sch->lpm;
380         sch->lpm = ((sch->schib.pmcw.pim &
381                      sch->schib.pmcw.pam &
382                      sch->schib.pmcw.pom)
383                     | chp_mask) & sch->opm;
384         if (!old_lpm && sch->lpm)
385                 device_trigger_reprobe(sch);
386         else if (sch->driver && sch->driver->verify)
387                 sch->driver->verify(&sch->dev);
388
389         spin_unlock_irq(sch->lock);
390         put_device(&sch->dev);
391         return 0;
392 }
393
394
395 static int
396 s390_process_res_acc (struct res_acc_data *res_data)
397 {
398         int rc;
399         char dbf_txt[15];
400
401         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
402                 res_data->chpid.id);
403         CIO_TRACE_EVENT( 2, dbf_txt);
404         if (res_data->fla != 0) {
405                 sprintf(dbf_txt, "fla%x", res_data->fla);
406                 CIO_TRACE_EVENT( 2, dbf_txt);
407         }
408
409         /*
410          * I/O resources may have become accessible.
411          * Scan through all subchannels that may be concerned and
412          * do a validation on those.
413          * The more information we have (info), the less scanning
414          * will we have to do.
415          */
416         rc = for_each_subchannel(__s390_process_res_acc, res_data);
417         if (css_slow_subchannels_exist())
418                 rc = -EAGAIN;
419         else if (rc != -EAGAIN)
420                 rc = 0;
421         return rc;
422 }
423
424 static int
425 __get_chpid_from_lir(void *data)
426 {
427         struct lir {
428                 u8  iq;
429                 u8  ic;
430                 u16 sci;
431                 /* incident-node descriptor */
432                 u32 indesc[28];
433                 /* attached-node descriptor */
434                 u32 andesc[28];
435                 /* incident-specific information */
436                 u32 isinfo[28];
437         } __attribute__ ((packed)) *lir;
438
439         lir = data;
440         if (!(lir->iq&0x80))
441                 /* NULL link incident record */
442                 return -EINVAL;
443         if (!(lir->indesc[0]&0xc0000000))
444                 /* node descriptor not valid */
445                 return -EINVAL;
446         if (!(lir->indesc[0]&0x10000000))
447                 /* don't handle device-type nodes - FIXME */
448                 return -EINVAL;
449         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
450
451         return (u16) (lir->indesc[0]&0x000000ff);
452 }
453
454 struct chsc_sei_area {
455         struct chsc_header request;
456         u32 reserved1;
457         u32 reserved2;
458         u32 reserved3;
459         struct chsc_header response;
460         u32 reserved4;
461         u8  flags;
462         u8  vf;         /* validity flags */
463         u8  rs;         /* reporting source */
464         u8  cc;         /* content code */
465         u16 fla;        /* full link address */
466         u16 rsid;       /* reporting source id */
467         u32 reserved5;
468         u32 reserved6;
469         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
470         /* ccdf has to be big enough for a link-incident record */
471 } __attribute__ ((packed));
472
473 static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
474 {
475         struct chp_id chpid;
476         int id;
477
478         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
479                       sei_area->rs, sei_area->rsid);
480         if (sei_area->rs != 4)
481                 return 0;
482         id = __get_chpid_from_lir(sei_area->ccdf);
483         if (id < 0)
484                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
485         else {
486                 chp_id_init(&chpid);
487                 chpid.id = id;
488                 chsc_chp_offline(chpid);
489         }
490
491         return 0;
492 }
493
494 static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
495 {
496         struct res_acc_data res_data;
497         struct chp_id chpid;
498         int status;
499         int rc;
500
501         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
502                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
503         if (sei_area->rs != 4)
504                 return 0;
505         chp_id_init(&chpid);
506         chpid.id = sei_area->rsid;
507         /* allocate a new channel path structure, if needed */
508         status = chp_get_status(chpid);
509         if (status < 0)
510                 chp_new(chpid);
511         else if (!status)
512                 return 0;
513         memset(&res_data, 0, sizeof(struct res_acc_data));
514         res_data.chpid = chpid;
515         if ((sei_area->vf & 0xc0) != 0) {
516                 res_data.fla = sei_area->fla;
517                 if ((sei_area->vf & 0xc0) == 0xc0)
518                         /* full link address */
519                         res_data.fla_mask = 0xffff;
520                 else
521                         /* link address */
522                         res_data.fla_mask = 0xff00;
523         }
524         rc = s390_process_res_acc(&res_data);
525
526         return rc;
527 }
528
529 struct chp_config_data {
530         u8 map[32];
531         u8 op;
532         u8 pc;
533 };
534
535 static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536 {
537         struct chp_config_data *data;
538         struct chp_id chpid;
539         int num;
540
541         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
542         if (sei_area->rs != 0)
543                 return 0;
544         data = (struct chp_config_data *) &(sei_area->ccdf);
545         chp_id_init(&chpid);
546         for (num = 0; num <= __MAX_CHPID; num++) {
547                 if (!chp_test_bit(data->map, num))
548                         continue;
549                 chpid.id = num;
550                 printk(KERN_WARNING "cio: processing configure event %d for "
551                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
552                 switch (data->op) {
553                 case 0:
554                         chp_cfg_schedule(chpid, 1);
555                         break;
556                 case 1:
557                         chp_cfg_schedule(chpid, 0);
558                         break;
559                 case 2:
560                         chp_cfg_cancel_deconfigure(chpid);
561                         break;
562                 }
563         }
564
565         return 0;
566 }
567
568 static int chsc_process_sei(struct chsc_sei_area *sei_area)
569 {
570         int rc;
571
572         /* Check if we might have lost some information. */
573         if (sei_area->flags & 0x40)
574                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
575         /* which kind of information was stored? */
576         rc = 0;
577         switch (sei_area->cc) {
578         case 1: /* link incident*/
579                 rc = chsc_process_sei_link_incident(sei_area);
580                 break;
581         case 2: /* i/o resource accessibiliy */
582                 rc = chsc_process_sei_res_acc(sei_area);
583                 break;
584         case 8: /* channel-path-configuration notification */
585                 rc = chsc_process_sei_chp_config(sei_area);
586                 break;
587         default: /* other stuff */
588                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
589                               sei_area->cc);
590                 break;
591         }
592
593         return rc;
594 }
595
596 int chsc_process_crw(void)
597 {
598         struct chsc_sei_area *sei_area;
599         int ret;
600         int rc;
601
602         if (!sei_page)
603                 return 0;
604         /* Access to sei_page is serialized through machine check handler
605          * thread, so no need for locking. */
606         sei_area = sei_page;
607
608         CIO_TRACE_EVENT( 2, "prcss");
609         ret = 0;
610         do {
611                 memset(sei_area, 0, sizeof(*sei_area));
612                 sei_area->request.length = 0x0010;
613                 sei_area->request.code = 0x000e;
614                 if (chsc(sei_area))
615                         break;
616
617                 if (sei_area->response.code == 0x0001) {
618                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
619                         rc = chsc_process_sei(sei_area);
620                         if (rc)
621                                 ret = rc;
622                 } else {
623                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
624                                       sei_area->response.code);
625                         ret = 0;
626                         break;
627                 }
628         } while (sei_area->flags & 0x80);
629
630         return ret;
631 }
632
633 static int
634 __chp_add_new_sch(struct subchannel_id schid)
635 {
636         struct schib schib;
637         int ret;
638
639         if (stsch_err(schid, &schib))
640                 /* We're through */
641                 return need_rescan ? -EAGAIN : -ENXIO;
642
643         /* Put it on the slow path. */
644         ret = css_enqueue_subchannel_slow(schid);
645         if (ret) {
646                 css_clear_subchannel_slow_list();
647                 need_rescan = 1;
648                 return -EAGAIN;
649         }
650         return 0;
651 }
652
653
654 static int
655 __chp_add(struct subchannel_id schid, void *data)
656 {
657         int i, mask;
658         struct chp_id *chpid;
659         struct subchannel *sch;
660
661         chpid = data;
662         sch = get_subchannel_by_schid(schid);
663         if (!sch)
664                 /* Check if the subchannel is now available. */
665                 return __chp_add_new_sch(schid);
666         spin_lock_irq(sch->lock);
667         for (i=0; i<8; i++) {
668                 mask = 0x80 >> i;
669                 if ((sch->schib.pmcw.pim & mask) &&
670                     (sch->schib.pmcw.chpid[i] == chpid->id)) {
671                         if (stsch(sch->schid, &sch->schib) != 0) {
672                                 /* Endgame. */
673                                 spin_unlock_irq(sch->lock);
674                                 return -ENXIO;
675                         }
676                         break;
677                 }
678         }
679         if (i==8) {
680                 spin_unlock_irq(sch->lock);
681                 return 0;
682         }
683         sch->lpm = ((sch->schib.pmcw.pim &
684                      sch->schib.pmcw.pam &
685                      sch->schib.pmcw.pom)
686                     | mask) & sch->opm;
687
688         if (sch->driver && sch->driver->verify)
689                 sch->driver->verify(&sch->dev);
690
691         spin_unlock_irq(sch->lock);
692         put_device(&sch->dev);
693         return 0;
694 }
695
696 int chsc_chp_online(struct chp_id chpid)
697 {
698         int rc;
699         char dbf_txt[15];
700
701         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
702         CIO_TRACE_EVENT(2, dbf_txt);
703
704         if (chp_get_status(chpid) == 0)
705                 return 0;
706         rc = for_each_subchannel(__chp_add, &chpid);
707         if (css_slow_subchannels_exist())
708                 rc = -EAGAIN;
709         if (rc != -EAGAIN)
710                 rc = 0;
711         return rc;
712 }
713
714 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
715                                          struct chp_id chpid, int on)
716 {
717         int chp, old_lpm;
718         unsigned long flags;
719
720         if (!sch->ssd_info.valid)
721                 return;
722         
723         spin_lock_irqsave(sch->lock, flags);
724         old_lpm = sch->lpm;
725         for (chp = 0; chp < 8; chp++) {
726                 if (sch->ssd_info.chpid[chp] != chpid.id)
727                         continue;
728
729                 if (on) {
730                         sch->opm |= (0x80 >> chp);
731                         sch->lpm |= (0x80 >> chp);
732                         if (!old_lpm)
733                                 device_trigger_reprobe(sch);
734                         else if (sch->driver && sch->driver->verify)
735                                 sch->driver->verify(&sch->dev);
736                         break;
737                 }
738                 sch->opm &= ~(0x80 >> chp);
739                 sch->lpm &= ~(0x80 >> chp);
740                 if (check_for_io_on_path(sch, (0x80 >> chp))) {
741                         if (device_is_online(sch))
742                                 /* Path verification is done after killing. */
743                                 device_kill_io(sch);
744                         else {
745                                 /* Kill and retry internal I/O. */
746                                 terminate_internal_io(sch);
747                                 /* Re-start path verification. */
748                                 if (sch->driver && sch->driver->verify)
749                                         sch->driver->verify(&sch->dev);
750                         }
751                 } else if (!sch->lpm) {
752                         if (device_trigger_verify(sch) != 0) {
753                                 if (css_enqueue_subchannel_slow(sch->schid)) {
754                                         css_clear_subchannel_slow_list();
755                                         need_rescan = 1;
756                                 }
757                         }
758                 } else if (sch->driver && sch->driver->verify)
759                         sch->driver->verify(&sch->dev);
760                 break;
761         }
762         spin_unlock_irqrestore(sch->lock, flags);
763 }
764
765 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
766 {
767         struct subchannel *sch;
768         struct chp_id *chpid;
769
770         sch = to_subchannel(dev);
771         chpid = data;
772
773         __s390_subchannel_vary_chpid(sch, *chpid, 0);
774         return 0;
775 }
776
777 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
778 {
779         struct subchannel *sch;
780         struct chp_id *chpid;
781
782         sch = to_subchannel(dev);
783         chpid = data;
784
785         __s390_subchannel_vary_chpid(sch, *chpid, 1);
786         return 0;
787 }
788
789 static int
790 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
791 {
792         struct schib schib;
793         struct subchannel *sch;
794
795         sch = get_subchannel_by_schid(schid);
796         if (sch) {
797                 put_device(&sch->dev);
798                 return 0;
799         }
800         if (stsch_err(schid, &schib))
801                 /* We're through */
802                 return -ENXIO;
803         /* Put it on the slow path. */
804         if (css_enqueue_subchannel_slow(schid)) {
805                 css_clear_subchannel_slow_list();
806                 need_rescan = 1;
807                 return -EAGAIN;
808         }
809         return 0;
810 }
811
812 /**
813  * chsc_chp_vary - propagate channel-path vary operation to subchannels
814  * @chpid: channl-path ID
815  * @on: non-zero for vary online, zero for vary offline
816  */
817 int chsc_chp_vary(struct chp_id chpid, int on)
818 {
819         /*
820          * Redo PathVerification on the devices the chpid connects to
821          */
822
823         bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
824                          s390_subchannel_vary_chpid_on :
825                          s390_subchannel_vary_chpid_off);
826         if (on)
827                 /* Scan for new devices on varied on path. */
828                 for_each_subchannel(__s390_vary_chpid_on, NULL);
829         if (need_rescan || css_slow_subchannels_exist())
830                 queue_work(slow_path_wq, &slow_path_work);
831         return 0;
832 }
833
834 static void
835 chsc_remove_cmg_attr(struct channel_subsystem *css)
836 {
837         int i;
838
839         for (i = 0; i <= __MAX_CHPID; i++) {
840                 if (!css->chps[i])
841                         continue;
842                 chp_remove_cmg_attr(css->chps[i]);
843         }
844 }
845
846 static int
847 chsc_add_cmg_attr(struct channel_subsystem *css)
848 {
849         int i, ret;
850
851         ret = 0;
852         for (i = 0; i <= __MAX_CHPID; i++) {
853                 if (!css->chps[i])
854                         continue;
855                 ret = chp_add_cmg_attr(css->chps[i]);
856                 if (ret)
857                         goto cleanup;
858         }
859         return ret;
860 cleanup:
861         for (--i; i >= 0; i--) {
862                 if (!css->chps[i])
863                         continue;
864                 chp_remove_cmg_attr(css->chps[i]);
865         }
866         return ret;
867 }
868
869 static int
870 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
871 {
872         struct {
873                 struct chsc_header request;
874                 u32 operation_code : 2;
875                 u32 : 30;
876                 u32 key : 4;
877                 u32 : 28;
878                 u32 zeroes1;
879                 u32 cub_addr1;
880                 u32 zeroes2;
881                 u32 cub_addr2;
882                 u32 reserved[13];
883                 struct chsc_header response;
884                 u32 status : 8;
885                 u32 : 4;
886                 u32 fmt : 4;
887                 u32 : 16;
888         } __attribute__ ((packed)) *secm_area;
889         int ret, ccode;
890
891         secm_area = page;
892         secm_area->request.length = 0x0050;
893         secm_area->request.code = 0x0016;
894
895         secm_area->key = PAGE_DEFAULT_KEY;
896         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
897         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
898
899         secm_area->operation_code = enable ? 0 : 1;
900
901         ccode = chsc(secm_area);
902         if (ccode > 0)
903                 return (ccode == 3) ? -ENODEV : -EBUSY;
904
905         switch (secm_area->response.code) {
906         case 0x0001: /* Success. */
907                 ret = 0;
908                 break;
909         case 0x0003: /* Invalid block. */
910         case 0x0007: /* Invalid format. */
911         case 0x0008: /* Other invalid block. */
912                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
913                 ret = -EINVAL;
914                 break;
915         case 0x0004: /* Command not provided in model. */
916                 CIO_CRW_EVENT(2, "Model does not provide secm\n");
917                 ret = -EOPNOTSUPP;
918                 break;
919         case 0x0102: /* cub adresses incorrect */
920                 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
921                 ret = -EINVAL;
922                 break;
923         case 0x0103: /* key error */
924                 CIO_CRW_EVENT(2, "Access key error in secm\n");
925                 ret = -EINVAL;
926                 break;
927         case 0x0105: /* error while starting */
928                 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
929                 ret = -EIO;
930                 break;
931         default:
932                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
933                               secm_area->response.code);
934                 ret = -EIO;
935         }
936         return ret;
937 }
938
939 int
940 chsc_secm(struct channel_subsystem *css, int enable)
941 {
942         void  *secm_area;
943         int ret;
944
945         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
946         if (!secm_area)
947                 return -ENOMEM;
948
949         mutex_lock(&css->mutex);
950         if (enable && !css->cm_enabled) {
951                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
952                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
953                 if (!css->cub_addr1 || !css->cub_addr2) {
954                         free_page((unsigned long)css->cub_addr1);
955                         free_page((unsigned long)css->cub_addr2);
956                         free_page((unsigned long)secm_area);
957                         mutex_unlock(&css->mutex);
958                         return -ENOMEM;
959                 }
960         }
961         ret = __chsc_do_secm(css, enable, secm_area);
962         if (!ret) {
963                 css->cm_enabled = enable;
964                 if (css->cm_enabled) {
965                         ret = chsc_add_cmg_attr(css);
966                         if (ret) {
967                                 memset(secm_area, 0, PAGE_SIZE);
968                                 __chsc_do_secm(css, 0, secm_area);
969                                 css->cm_enabled = 0;
970                         }
971                 } else
972                         chsc_remove_cmg_attr(css);
973         }
974         if (enable && !css->cm_enabled) {
975                 free_page((unsigned long)css->cub_addr1);
976                 free_page((unsigned long)css->cub_addr2);
977         }
978         mutex_unlock(&css->mutex);
979         free_page((unsigned long)secm_area);
980         return ret;
981 }
982
983 int chsc_determine_channel_path_description(struct chp_id chpid,
984                                             struct channel_path_desc *desc)
985 {
986         int ccode, ret;
987
988         struct {
989                 struct chsc_header request;
990                 u32 : 24;
991                 u32 first_chpid : 8;
992                 u32 : 24;
993                 u32 last_chpid : 8;
994                 u32 zeroes1;
995                 struct chsc_header response;
996                 u32 zeroes2;
997                 struct channel_path_desc desc;
998         } __attribute__ ((packed)) *scpd_area;
999
1000         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1001         if (!scpd_area)
1002                 return -ENOMEM;
1003
1004         scpd_area->request.length = 0x0010;
1005         scpd_area->request.code = 0x0002;
1006
1007         scpd_area->first_chpid = chpid.id;
1008         scpd_area->last_chpid = chpid.id;
1009
1010         ccode = chsc(scpd_area);
1011         if (ccode > 0) {
1012                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1013                 goto out;
1014         }
1015
1016         switch (scpd_area->response.code) {
1017         case 0x0001: /* Success. */
1018                 memcpy(desc, &scpd_area->desc,
1019                        sizeof(struct channel_path_desc));
1020                 ret = 0;
1021                 break;
1022         case 0x0003: /* Invalid block. */
1023         case 0x0007: /* Invalid format. */
1024         case 0x0008: /* Other invalid block. */
1025                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1026                 ret = -EINVAL;
1027                 break;
1028         case 0x0004: /* Command not provided in model. */
1029                 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1030                 ret = -EOPNOTSUPP;
1031                 break;
1032         default:
1033                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1034                               scpd_area->response.code);
1035                 ret = -EIO;
1036         }
1037 out:
1038         free_page((unsigned long)scpd_area);
1039         return ret;
1040 }
1041
1042 static void
1043 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1044                           struct cmg_chars *chars)
1045 {
1046         switch (chp->cmg) {
1047         case 2:
1048         case 3:
1049                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1050                                          GFP_KERNEL);
1051                 if (chp->cmg_chars) {
1052                         int i, mask;
1053                         struct cmg_chars *cmg_chars;
1054
1055                         cmg_chars = chp->cmg_chars;
1056                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1057                                 mask = 0x80 >> (i + 3);
1058                                 if (cmcv & mask)
1059                                         cmg_chars->values[i] = chars->values[i];
1060                                 else
1061                                         cmg_chars->values[i] = 0;
1062                         }
1063                 }
1064                 break;
1065         default:
1066                 /* No cmg-dependent data. */
1067                 break;
1068         }
1069 }
1070
1071 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1072 {
1073         int ccode, ret;
1074
1075         struct {
1076                 struct chsc_header request;
1077                 u32 : 24;
1078                 u32 first_chpid : 8;
1079                 u32 : 24;
1080                 u32 last_chpid : 8;
1081                 u32 zeroes1;
1082                 struct chsc_header response;
1083                 u32 zeroes2;
1084                 u32 not_valid : 1;
1085                 u32 shared : 1;
1086                 u32 : 22;
1087                 u32 chpid : 8;
1088                 u32 cmcv : 5;
1089                 u32 : 11;
1090                 u32 cmgq : 8;
1091                 u32 cmg : 8;
1092                 u32 zeroes3;
1093                 u32 data[NR_MEASUREMENT_CHARS];
1094         } __attribute__ ((packed)) *scmc_area;
1095
1096         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1097         if (!scmc_area)
1098                 return -ENOMEM;
1099
1100         scmc_area->request.length = 0x0010;
1101         scmc_area->request.code = 0x0022;
1102
1103         scmc_area->first_chpid = chp->chpid.id;
1104         scmc_area->last_chpid = chp->chpid.id;
1105
1106         ccode = chsc(scmc_area);
1107         if (ccode > 0) {
1108                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1109                 goto out;
1110         }
1111
1112         switch (scmc_area->response.code) {
1113         case 0x0001: /* Success. */
1114                 if (!scmc_area->not_valid) {
1115                         chp->cmg = scmc_area->cmg;
1116                         chp->shared = scmc_area->shared;
1117                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1118                                                   (struct cmg_chars *)
1119                                                   &scmc_area->data);
1120                 } else {
1121                         chp->cmg = -1;
1122                         chp->shared = -1;
1123                 }
1124                 ret = 0;
1125                 break;
1126         case 0x0003: /* Invalid block. */
1127         case 0x0007: /* Invalid format. */
1128         case 0x0008: /* Invalid bit combination. */
1129                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1130                 ret = -EINVAL;
1131                 break;
1132         case 0x0004: /* Command not provided. */
1133                 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1134                 ret = -EOPNOTSUPP;
1135                 break;
1136         default:
1137                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1138                               scmc_area->response.code);
1139                 ret = -EIO;
1140         }
1141 out:
1142         free_page((unsigned long)scmc_area);
1143         return ret;
1144 }
1145
1146 static int __init
1147 chsc_alloc_sei_area(void)
1148 {
1149         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1150         if (!sei_page)
1151                 printk(KERN_WARNING"Can't allocate page for processing of " \
1152                        "chsc machine checks!\n");
1153         return (sei_page ? 0 : -ENOMEM);
1154 }
1155
1156 int __init
1157 chsc_enable_facility(int operation_code)
1158 {
1159         int ret;
1160         struct {
1161                 struct chsc_header request;
1162                 u8 reserved1:4;
1163                 u8 format:4;
1164                 u8 reserved2;
1165                 u16 operation_code;
1166                 u32 reserved3;
1167                 u32 reserved4;
1168                 u32 operation_data_area[252];
1169                 struct chsc_header response;
1170                 u32 reserved5:4;
1171                 u32 format2:4;
1172                 u32 reserved6:24;
1173         } __attribute__ ((packed)) *sda_area;
1174
1175         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1176         if (!sda_area)
1177                 return -ENOMEM;
1178         sda_area->request.length = 0x0400;
1179         sda_area->request.code = 0x0031;
1180         sda_area->operation_code = operation_code;
1181
1182         ret = chsc(sda_area);
1183         if (ret > 0) {
1184                 ret = (ret == 3) ? -ENODEV : -EBUSY;
1185                 goto out;
1186         }
1187         switch (sda_area->response.code) {
1188         case 0x0001: /* everything ok */
1189                 ret = 0;
1190                 break;
1191         case 0x0003: /* invalid request block */
1192         case 0x0007:
1193                 ret = -EINVAL;
1194                 break;
1195         case 0x0004: /* command not provided */
1196         case 0x0101: /* facility not provided */
1197                 ret = -EOPNOTSUPP;
1198                 break;
1199         default: /* something went wrong */
1200                 ret = -EIO;
1201         }
1202  out:
1203         free_page((unsigned long)sda_area);
1204         return ret;
1205 }
1206
1207 subsys_initcall(chsc_alloc_sei_area);
1208
1209 struct css_general_char css_general_characteristics;
1210 struct css_chsc_char css_chsc_characteristics;
1211
1212 int __init
1213 chsc_determine_css_characteristics(void)
1214 {
1215         int result;
1216         struct {
1217                 struct chsc_header request;
1218                 u32 reserved1;
1219                 u32 reserved2;
1220                 u32 reserved3;
1221                 struct chsc_header response;
1222                 u32 reserved4;
1223                 u32 general_char[510];
1224                 u32 chsc_char[518];
1225         } __attribute__ ((packed)) *scsc_area;
1226
1227         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1228         if (!scsc_area) {
1229                 printk(KERN_WARNING"cio: Was not able to determine available" \
1230                        "CHSCs due to no memory.\n");
1231                 return -ENOMEM;
1232         }
1233
1234         scsc_area->request.length = 0x0010;
1235         scsc_area->request.code = 0x0010;
1236
1237         result = chsc(scsc_area);
1238         if (result) {
1239                 printk(KERN_WARNING"cio: Was not able to determine " \
1240                        "available CHSCs, cc=%i.\n", result);
1241                 result = -EIO;
1242                 goto exit;
1243         }
1244
1245         if (scsc_area->response.code != 1) {
1246                 printk(KERN_WARNING"cio: Was not able to determine " \
1247                        "available CHSCs.\n");
1248                 result = -EIO;
1249                 goto exit;
1250         }
1251         memcpy(&css_general_characteristics, scsc_area->general_char,
1252                sizeof(css_general_characteristics));
1253         memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1254                sizeof(css_chsc_characteristics));
1255 exit:
1256         free_page ((unsigned long) scsc_area);
1257         return result;
1258 }
1259
1260 EXPORT_SYMBOL_GPL(css_general_characteristics);
1261 EXPORT_SYMBOL_GPL(css_chsc_characteristics);