3dec460bba2740ee6f42ddf771a122f39e45fd82
[pandora-kernel.git] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *                            IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  *               Arnd Bergmann (arndb@de.ibm.com)
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 /* FIXME: this is _always_ called for every subchannel. shouldn't we
30  *        process more than one at a time? */
31 static int
32 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
33 {
34         int ccode, j;
35
36         struct {
37                 struct chsc_header request;
38                 u16 reserved1a:10;
39                 u16 ssid:2;
40                 u16 reserved1b:4;
41                 u16 f_sch;        /* first subchannel */
42                 u16 reserved2;
43                 u16 l_sch;        /* last subchannel */
44                 u32 reserved3;
45                 struct chsc_header response;
46                 u32 reserved4;
47                 u8 sch_valid : 1;
48                 u8 dev_valid : 1;
49                 u8 st        : 3; /* subchannel type */
50                 u8 zeroes    : 3;
51                 u8  unit_addr;    /* unit address */
52                 u16 devno;        /* device number */
53                 u8 path_mask;
54                 u8 fla_valid_mask;
55                 u16 sch;          /* subchannel */
56                 u8 chpid[8];      /* chpids 0-7 */
57                 u16 fla[8];       /* full link addresses 0-7 */
58         } __attribute__ ((packed)) *ssd_area;
59
60         ssd_area = page;
61
62         ssd_area->request.length = 0x0010;
63         ssd_area->request.code = 0x0004;
64
65         ssd_area->ssid = sch->schid.ssid;
66         ssd_area->f_sch = sch->schid.sch_no;
67         ssd_area->l_sch = sch->schid.sch_no;
68
69         ccode = chsc(ssd_area);
70         if (ccode > 0) {
71                 pr_debug("chsc returned with ccode = %d\n", ccode);
72                 return (ccode == 3) ? -ENODEV : -EBUSY;
73         }
74
75         switch (ssd_area->response.code) {
76         case 0x0001: /* everything ok */
77                 break;
78         case 0x0002:
79                 CIO_CRW_EVENT(2, "Invalid command!\n");
80                 return -EINVAL;
81         case 0x0003:
82                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
83                 return -EINVAL;
84         case 0x0004:
85                 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
86                 return -EOPNOTSUPP;
87         default:
88                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
89                               ssd_area->response.code);
90                 return -EIO;
91         }
92
93         /*
94          * ssd_area->st stores the type of the detected
95          * subchannel, with the following definitions:
96          *
97          * 0: I/O subchannel:     All fields have meaning
98          * 1: CHSC subchannel:    Only sch_val, st and sch
99          *                        have meaning
100          * 2: Message subchannel: All fields except unit_addr
101          *                        have meaning
102          * 3: ADM subchannel:     Only sch_val, st and sch
103          *                        have meaning
104          *
105          * Other types are currently undefined.
106          */
107         if (ssd_area->st > 3) { /* uhm, that looks strange... */
108                 CIO_CRW_EVENT(0, "Strange subchannel type %d"
109                               " for sch 0.%x.%04x\n", ssd_area->st,
110                               sch->schid.ssid, sch->schid.sch_no);
111                 /*
112                  * There may have been a new subchannel type defined in the
113                  * time since this code was written; since we don't know which
114                  * fields have meaning and what to do with it we just jump out
115                  */
116                 return 0;
117         } else {
118                 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
119                 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
120                               sch->schid.ssid, sch->schid.sch_no,
121                               type[ssd_area->st]);
122
123                 sch->ssd_info.valid = 1;
124                 sch->ssd_info.type = ssd_area->st;
125         }
126
127         if (ssd_area->st == 0 || ssd_area->st == 2) {
128                 for (j = 0; j < 8; j++) {
129                         if (!((0x80 >> j) & ssd_area->path_mask &
130                               ssd_area->fla_valid_mask))
131                                 continue;
132                         sch->ssd_info.chpid[j] = ssd_area->chpid[j];
133                         sch->ssd_info.fla[j]   = ssd_area->fla[j];
134                 }
135         }
136         return 0;
137 }
138
139 int
140 css_get_ssd_info(struct subchannel *sch)
141 {
142         int ret;
143         void *page;
144
145         page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
146         if (!page)
147                 return -ENOMEM;
148         spin_lock_irq(sch->lock);
149         ret = chsc_get_sch_desc_irq(sch, page);
150         if (ret) {
151                 static int cio_chsc_err_msg;
152                 
153                 if (!cio_chsc_err_msg) {
154                         printk(KERN_ERR
155                                "chsc_get_sch_descriptions:"
156                                " Error %d while doing chsc; "
157                                "processing some machine checks may "
158                                "not work\n", ret);
159                         cio_chsc_err_msg = 1;
160                 }
161         }
162         spin_unlock_irq(sch->lock);
163         free_page((unsigned long)page);
164         if (!ret) {
165                 int j, mask;
166                 struct chp_id chpid;
167
168                 chp_id_init(&chpid);
169                 /* Allocate channel path structures, if needed. */
170                 for (j = 0; j < 8; j++) {
171                         mask = 0x80 >> j;
172                         chpid.id = sch->ssd_info.chpid[j];
173                         if ((sch->schib.pmcw.pim & mask) &&
174                             !chp_is_registered(chpid))
175                                 chp_new(chpid);
176                 }
177         }
178         return ret;
179 }
180
181 static int
182 s390_subchannel_remove_chpid(struct device *dev, void *data)
183 {
184         int j;
185         int mask;
186         struct subchannel *sch;
187         struct chp_id *chpid;
188         struct schib schib;
189
190         sch = to_subchannel(dev);
191         chpid = data;
192         for (j = 0; j < 8; j++) {
193                 mask = 0x80 >> j;
194                 if ((sch->schib.pmcw.pim & mask) &&
195                     (sch->schib.pmcw.chpid[j] == chpid->id))
196                         break;
197         }
198         if (j >= 8)
199                 return 0;
200
201         spin_lock_irq(sch->lock);
202
203         stsch(sch->schid, &schib);
204         if (!schib.pmcw.dnv)
205                 goto out_unreg;
206         memcpy(&sch->schib, &schib, sizeof(struct schib));
207         /* Check for single path devices. */
208         if (sch->schib.pmcw.pim == 0x80)
209                 goto out_unreg;
210
211         if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
212             (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
213             (sch->schib.pmcw.lpum == mask)) {
214                 int cc;
215
216                 cc = cio_clear(sch);
217                 if (cc == -ENODEV)
218                         goto out_unreg;
219                 /* Request retry of internal operation. */
220                 device_set_intretry(sch);
221                 /* Call handler. */
222                 if (sch->driver && sch->driver->termination)
223                         sch->driver->termination(&sch->dev);
224                 goto out_unlock;
225         }
226
227         /* trigger path verification. */
228         if (sch->driver && sch->driver->verify)
229                 sch->driver->verify(&sch->dev);
230         else if (sch->lpm == mask)
231                 goto out_unreg;
232 out_unlock:
233         spin_unlock_irq(sch->lock);
234         return 0;
235 out_unreg:
236         spin_unlock_irq(sch->lock);
237         sch->lpm = 0;
238         if (css_enqueue_subchannel_slow(sch->schid)) {
239                 css_clear_subchannel_slow_list();
240                 need_rescan = 1;
241         }
242         return 0;
243 }
244
245 void chsc_chp_offline(struct chp_id chpid)
246 {
247         char dbf_txt[15];
248
249         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
250         CIO_TRACE_EVENT(2, dbf_txt);
251
252         if (chp_get_status(chpid) <= 0)
253                 return;
254         bus_for_each_dev(&css_bus_type, NULL, &chpid,
255                          s390_subchannel_remove_chpid);
256
257         if (need_rescan || css_slow_subchannels_exist())
258                 queue_work(slow_path_wq, &slow_path_work);
259 }
260
261 struct res_acc_data {
262         struct chp_id chpid;
263         u32 fla_mask;
264         u16 fla;
265 };
266
267 static int s390_process_res_acc_sch(struct res_acc_data *res_data,
268                                     struct subchannel *sch)
269 {
270         int found;
271         int chp;
272         int ccode;
273
274         found = 0;
275         for (chp = 0; chp <= 7; chp++)
276                 /*
277                  * check if chpid is in information updated by ssd
278                  */
279                 if (sch->ssd_info.valid &&
280                     sch->ssd_info.chpid[chp] == res_data->chpid.id &&
281                     (sch->ssd_info.fla[chp] & res_data->fla_mask)
282                     == res_data->fla) {
283                         found = 1;
284                         break;
285                 }
286
287         if (found == 0)
288                 return 0;
289
290         /*
291          * Do a stsch to update our subchannel structure with the
292          * new path information and eventually check for logically
293          * offline chpids.
294          */
295         ccode = stsch(sch->schid, &sch->schib);
296         if (ccode > 0)
297                 return 0;
298
299         return 0x80 >> chp;
300 }
301
302 static int
303 s390_process_res_acc_new_sch(struct subchannel_id schid)
304 {
305         struct schib schib;
306         int ret;
307         /*
308          * We don't know the device yet, but since a path
309          * may be available now to the device we'll have
310          * to do recognition again.
311          * Since we don't have any idea about which chpid
312          * that beast may be on we'll have to do a stsch
313          * on all devices, grr...
314          */
315         if (stsch_err(schid, &schib))
316                 /* We're through */
317                 return need_rescan ? -EAGAIN : -ENXIO;
318
319         /* Put it on the slow path. */
320         ret = css_enqueue_subchannel_slow(schid);
321         if (ret) {
322                 css_clear_subchannel_slow_list();
323                 need_rescan = 1;
324                 return -EAGAIN;
325         }
326         return 0;
327 }
328
329 static int
330 __s390_process_res_acc(struct subchannel_id schid, void *data)
331 {
332         int chp_mask, old_lpm;
333         struct res_acc_data *res_data;
334         struct subchannel *sch;
335
336         res_data = data;
337         sch = get_subchannel_by_schid(schid);
338         if (!sch)
339                 /* Check if a subchannel is newly available. */
340                 return s390_process_res_acc_new_sch(schid);
341
342         spin_lock_irq(sch->lock);
343
344         chp_mask = s390_process_res_acc_sch(res_data, sch);
345
346         if (chp_mask == 0) {
347                 spin_unlock_irq(sch->lock);
348                 put_device(&sch->dev);
349                 return 0;
350         }
351         old_lpm = sch->lpm;
352         sch->lpm = ((sch->schib.pmcw.pim &
353                      sch->schib.pmcw.pam &
354                      sch->schib.pmcw.pom)
355                     | chp_mask) & sch->opm;
356         if (!old_lpm && sch->lpm)
357                 device_trigger_reprobe(sch);
358         else if (sch->driver && sch->driver->verify)
359                 sch->driver->verify(&sch->dev);
360
361         spin_unlock_irq(sch->lock);
362         put_device(&sch->dev);
363         return 0;
364 }
365
366
367 static int
368 s390_process_res_acc (struct res_acc_data *res_data)
369 {
370         int rc;
371         char dbf_txt[15];
372
373         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
374                 res_data->chpid.id);
375         CIO_TRACE_EVENT( 2, dbf_txt);
376         if (res_data->fla != 0) {
377                 sprintf(dbf_txt, "fla%x", res_data->fla);
378                 CIO_TRACE_EVENT( 2, dbf_txt);
379         }
380
381         /*
382          * I/O resources may have become accessible.
383          * Scan through all subchannels that may be concerned and
384          * do a validation on those.
385          * The more information we have (info), the less scanning
386          * will we have to do.
387          */
388         rc = for_each_subchannel(__s390_process_res_acc, res_data);
389         if (css_slow_subchannels_exist())
390                 rc = -EAGAIN;
391         else if (rc != -EAGAIN)
392                 rc = 0;
393         return rc;
394 }
395
396 static int
397 __get_chpid_from_lir(void *data)
398 {
399         struct lir {
400                 u8  iq;
401                 u8  ic;
402                 u16 sci;
403                 /* incident-node descriptor */
404                 u32 indesc[28];
405                 /* attached-node descriptor */
406                 u32 andesc[28];
407                 /* incident-specific information */
408                 u32 isinfo[28];
409         } __attribute__ ((packed)) *lir;
410
411         lir = data;
412         if (!(lir->iq&0x80))
413                 /* NULL link incident record */
414                 return -EINVAL;
415         if (!(lir->indesc[0]&0xc0000000))
416                 /* node descriptor not valid */
417                 return -EINVAL;
418         if (!(lir->indesc[0]&0x10000000))
419                 /* don't handle device-type nodes - FIXME */
420                 return -EINVAL;
421         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
422
423         return (u16) (lir->indesc[0]&0x000000ff);
424 }
425
426 struct chsc_sei_area {
427         struct chsc_header request;
428         u32 reserved1;
429         u32 reserved2;
430         u32 reserved3;
431         struct chsc_header response;
432         u32 reserved4;
433         u8  flags;
434         u8  vf;         /* validity flags */
435         u8  rs;         /* reporting source */
436         u8  cc;         /* content code */
437         u16 fla;        /* full link address */
438         u16 rsid;       /* reporting source id */
439         u32 reserved5;
440         u32 reserved6;
441         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
442         /* ccdf has to be big enough for a link-incident record */
443 } __attribute__ ((packed));
444
445 static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
446 {
447         struct chp_id chpid;
448         int id;
449
450         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
451                       sei_area->rs, sei_area->rsid);
452         if (sei_area->rs != 4)
453                 return 0;
454         id = __get_chpid_from_lir(sei_area->ccdf);
455         if (id < 0)
456                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
457         else {
458                 chp_id_init(&chpid);
459                 chpid.id = id;
460                 chsc_chp_offline(chpid);
461         }
462
463         return 0;
464 }
465
466 static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
467 {
468         struct res_acc_data res_data;
469         struct chp_id chpid;
470         int status;
471         int rc;
472
473         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475         if (sei_area->rs != 4)
476                 return 0;
477         chp_id_init(&chpid);
478         chpid.id = sei_area->rsid;
479         /* allocate a new channel path structure, if needed */
480         status = chp_get_status(chpid);
481         if (status < 0)
482                 chp_new(chpid);
483         else if (!status)
484                 return 0;
485         memset(&res_data, 0, sizeof(struct res_acc_data));
486         res_data.chpid = chpid;
487         if ((sei_area->vf & 0xc0) != 0) {
488                 res_data.fla = sei_area->fla;
489                 if ((sei_area->vf & 0xc0) == 0xc0)
490                         /* full link address */
491                         res_data.fla_mask = 0xffff;
492                 else
493                         /* link address */
494                         res_data.fla_mask = 0xff00;
495         }
496         rc = s390_process_res_acc(&res_data);
497
498         return rc;
499 }
500
501 struct chp_config_data {
502         u8 map[32];
503         u8 op;
504         u8 pc;
505 };
506
507 static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
508 {
509         struct chp_config_data *data;
510         struct chp_id chpid;
511         int num;
512
513         CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
514         if (sei_area->rs != 0)
515                 return 0;
516         data = (struct chp_config_data *) &(sei_area->ccdf);
517         chp_id_init(&chpid);
518         for (num = 0; num <= __MAX_CHPID; num++) {
519                 if (!chp_test_bit(data->map, num))
520                         continue;
521                 chpid.id = num;
522                 printk(KERN_WARNING "cio: processing configure event %d for "
523                        "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
524                 switch (data->op) {
525                 case 0:
526                         chp_cfg_schedule(chpid, 1);
527                         break;
528                 case 1:
529                         chp_cfg_schedule(chpid, 0);
530                         break;
531                 case 2:
532                         chp_cfg_cancel_deconfigure(chpid);
533                         break;
534                 }
535         }
536
537         return 0;
538 }
539
540 static int chsc_process_sei(struct chsc_sei_area *sei_area)
541 {
542         int rc;
543
544         /* Check if we might have lost some information. */
545         if (sei_area->flags & 0x40)
546                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
547         /* which kind of information was stored? */
548         rc = 0;
549         switch (sei_area->cc) {
550         case 1: /* link incident*/
551                 rc = chsc_process_sei_link_incident(sei_area);
552                 break;
553         case 2: /* i/o resource accessibiliy */
554                 rc = chsc_process_sei_res_acc(sei_area);
555                 break;
556         case 8: /* channel-path-configuration notification */
557                 rc = chsc_process_sei_chp_config(sei_area);
558                 break;
559         default: /* other stuff */
560                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
561                               sei_area->cc);
562                 break;
563         }
564
565         return rc;
566 }
567
568 int chsc_process_crw(void)
569 {
570         struct chsc_sei_area *sei_area;
571         int ret;
572         int rc;
573
574         if (!sei_page)
575                 return 0;
576         /* Access to sei_page is serialized through machine check handler
577          * thread, so no need for locking. */
578         sei_area = sei_page;
579
580         CIO_TRACE_EVENT( 2, "prcss");
581         ret = 0;
582         do {
583                 memset(sei_area, 0, sizeof(*sei_area));
584                 sei_area->request.length = 0x0010;
585                 sei_area->request.code = 0x000e;
586                 if (chsc(sei_area))
587                         break;
588
589                 if (sei_area->response.code == 0x0001) {
590                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
591                         rc = chsc_process_sei(sei_area);
592                         if (rc)
593                                 ret = rc;
594                 } else {
595                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
596                                       sei_area->response.code);
597                         ret = 0;
598                         break;
599                 }
600         } while (sei_area->flags & 0x80);
601
602         return ret;
603 }
604
605 static int
606 __chp_add_new_sch(struct subchannel_id schid)
607 {
608         struct schib schib;
609         int ret;
610
611         if (stsch_err(schid, &schib))
612                 /* We're through */
613                 return need_rescan ? -EAGAIN : -ENXIO;
614
615         /* Put it on the slow path. */
616         ret = css_enqueue_subchannel_slow(schid);
617         if (ret) {
618                 css_clear_subchannel_slow_list();
619                 need_rescan = 1;
620                 return -EAGAIN;
621         }
622         return 0;
623 }
624
625
626 static int
627 __chp_add(struct subchannel_id schid, void *data)
628 {
629         int i, mask;
630         struct chp_id *chpid;
631         struct subchannel *sch;
632
633         chpid = data;
634         sch = get_subchannel_by_schid(schid);
635         if (!sch)
636                 /* Check if the subchannel is now available. */
637                 return __chp_add_new_sch(schid);
638         spin_lock_irq(sch->lock);
639         for (i=0; i<8; i++) {
640                 mask = 0x80 >> i;
641                 if ((sch->schib.pmcw.pim & mask) &&
642                     (sch->schib.pmcw.chpid[i] == chpid->id)) {
643                         if (stsch(sch->schid, &sch->schib) != 0) {
644                                 /* Endgame. */
645                                 spin_unlock_irq(sch->lock);
646                                 return -ENXIO;
647                         }
648                         break;
649                 }
650         }
651         if (i==8) {
652                 spin_unlock_irq(sch->lock);
653                 return 0;
654         }
655         sch->lpm = ((sch->schib.pmcw.pim &
656                      sch->schib.pmcw.pam &
657                      sch->schib.pmcw.pom)
658                     | mask) & sch->opm;
659
660         if (sch->driver && sch->driver->verify)
661                 sch->driver->verify(&sch->dev);
662
663         spin_unlock_irq(sch->lock);
664         put_device(&sch->dev);
665         return 0;
666 }
667
668 int chsc_chp_online(struct chp_id chpid)
669 {
670         int rc;
671         char dbf_txt[15];
672
673         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
674         CIO_TRACE_EVENT(2, dbf_txt);
675
676         if (chp_get_status(chpid) == 0)
677                 return 0;
678         rc = for_each_subchannel(__chp_add, &chpid);
679         if (css_slow_subchannels_exist())
680                 rc = -EAGAIN;
681         if (rc != -EAGAIN)
682                 rc = 0;
683         return rc;
684 }
685
686 static int check_for_io_on_path(struct subchannel *sch, int index)
687 {
688         int cc;
689
690         cc = stsch(sch->schid, &sch->schib);
691         if (cc)
692                 return 0;
693         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
694                 return 1;
695         return 0;
696 }
697
698 static void terminate_internal_io(struct subchannel *sch)
699 {
700         if (cio_clear(sch)) {
701                 /* Recheck device in case clear failed. */
702                 sch->lpm = 0;
703                 if (device_trigger_verify(sch) != 0) {
704                         if(css_enqueue_subchannel_slow(sch->schid)) {
705                                 css_clear_subchannel_slow_list();
706                                 need_rescan = 1;
707                         }
708                 }
709                 return;
710         }
711         /* Request retry of internal operation. */
712         device_set_intretry(sch);
713         /* Call handler. */
714         if (sch->driver && sch->driver->termination)
715                 sch->driver->termination(&sch->dev);
716 }
717
718 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
719                                          struct chp_id chpid, int on)
720 {
721         int chp, old_lpm;
722         unsigned long flags;
723
724         if (!sch->ssd_info.valid)
725                 return;
726         
727         spin_lock_irqsave(sch->lock, flags);
728         old_lpm = sch->lpm;
729         for (chp = 0; chp < 8; chp++) {
730                 if (sch->ssd_info.chpid[chp] != chpid.id)
731                         continue;
732
733                 if (on) {
734                         sch->opm |= (0x80 >> chp);
735                         sch->lpm |= (0x80 >> chp);
736                         if (!old_lpm)
737                                 device_trigger_reprobe(sch);
738                         else if (sch->driver && sch->driver->verify)
739                                 sch->driver->verify(&sch->dev);
740                         break;
741                 }
742                 sch->opm &= ~(0x80 >> chp);
743                 sch->lpm &= ~(0x80 >> chp);
744                 if (check_for_io_on_path(sch, chp)) {
745                         if (device_is_online(sch))
746                                 /* Path verification is done after killing. */
747                                 device_kill_io(sch);
748                         else
749                                 /* Kill and retry internal I/O. */
750                                 terminate_internal_io(sch);
751                 } else if (!sch->lpm) {
752                         if (device_trigger_verify(sch) != 0) {
753                                 if (css_enqueue_subchannel_slow(sch->schid)) {
754                                         css_clear_subchannel_slow_list();
755                                         need_rescan = 1;
756                                 }
757                         }
758                 } else if (sch->driver && sch->driver->verify)
759                         sch->driver->verify(&sch->dev);
760                 break;
761         }
762         spin_unlock_irqrestore(sch->lock, flags);
763 }
764
765 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
766 {
767         struct subchannel *sch;
768         struct chp_id *chpid;
769
770         sch = to_subchannel(dev);
771         chpid = data;
772
773         __s390_subchannel_vary_chpid(sch, *chpid, 0);
774         return 0;
775 }
776
777 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
778 {
779         struct subchannel *sch;
780         struct chp_id *chpid;
781
782         sch = to_subchannel(dev);
783         chpid = data;
784
785         __s390_subchannel_vary_chpid(sch, *chpid, 1);
786         return 0;
787 }
788
789 static int
790 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
791 {
792         struct schib schib;
793         struct subchannel *sch;
794
795         sch = get_subchannel_by_schid(schid);
796         if (sch) {
797                 put_device(&sch->dev);
798                 return 0;
799         }
800         if (stsch_err(schid, &schib))
801                 /* We're through */
802                 return -ENXIO;
803         /* Put it on the slow path. */
804         if (css_enqueue_subchannel_slow(schid)) {
805                 css_clear_subchannel_slow_list();
806                 need_rescan = 1;
807                 return -EAGAIN;
808         }
809         return 0;
810 }
811
812 /**
813  * chsc_chp_vary - propagate channel-path vary operation to subchannels
814  * @chpid: channl-path ID
815  * @on: non-zero for vary online, zero for vary offline
816  */
817 int chsc_chp_vary(struct chp_id chpid, int on)
818 {
819         /*
820          * Redo PathVerification on the devices the chpid connects to
821          */
822
823         bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
824                          s390_subchannel_vary_chpid_on :
825                          s390_subchannel_vary_chpid_off);
826         if (on)
827                 /* Scan for new devices on varied on path. */
828                 for_each_subchannel(__s390_vary_chpid_on, NULL);
829         if (need_rescan || css_slow_subchannels_exist())
830                 queue_work(slow_path_wq, &slow_path_work);
831         return 0;
832 }
833
834 static void
835 chsc_remove_cmg_attr(struct channel_subsystem *css)
836 {
837         int i;
838
839         for (i = 0; i <= __MAX_CHPID; i++) {
840                 if (!css->chps[i])
841                         continue;
842                 chp_remove_cmg_attr(css->chps[i]);
843         }
844 }
845
846 static int
847 chsc_add_cmg_attr(struct channel_subsystem *css)
848 {
849         int i, ret;
850
851         ret = 0;
852         for (i = 0; i <= __MAX_CHPID; i++) {
853                 if (!css->chps[i])
854                         continue;
855                 ret = chp_add_cmg_attr(css->chps[i]);
856                 if (ret)
857                         goto cleanup;
858         }
859         return ret;
860 cleanup:
861         for (--i; i >= 0; i--) {
862                 if (!css->chps[i])
863                         continue;
864                 chp_remove_cmg_attr(css->chps[i]);
865         }
866         return ret;
867 }
868
869 static int
870 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
871 {
872         struct {
873                 struct chsc_header request;
874                 u32 operation_code : 2;
875                 u32 : 30;
876                 u32 key : 4;
877                 u32 : 28;
878                 u32 zeroes1;
879                 u32 cub_addr1;
880                 u32 zeroes2;
881                 u32 cub_addr2;
882                 u32 reserved[13];
883                 struct chsc_header response;
884                 u32 status : 8;
885                 u32 : 4;
886                 u32 fmt : 4;
887                 u32 : 16;
888         } __attribute__ ((packed)) *secm_area;
889         int ret, ccode;
890
891         secm_area = page;
892         secm_area->request.length = 0x0050;
893         secm_area->request.code = 0x0016;
894
895         secm_area->key = PAGE_DEFAULT_KEY;
896         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
897         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
898
899         secm_area->operation_code = enable ? 0 : 1;
900
901         ccode = chsc(secm_area);
902         if (ccode > 0)
903                 return (ccode == 3) ? -ENODEV : -EBUSY;
904
905         switch (secm_area->response.code) {
906         case 0x0001: /* Success. */
907                 ret = 0;
908                 break;
909         case 0x0003: /* Invalid block. */
910         case 0x0007: /* Invalid format. */
911         case 0x0008: /* Other invalid block. */
912                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
913                 ret = -EINVAL;
914                 break;
915         case 0x0004: /* Command not provided in model. */
916                 CIO_CRW_EVENT(2, "Model does not provide secm\n");
917                 ret = -EOPNOTSUPP;
918                 break;
919         case 0x0102: /* cub adresses incorrect */
920                 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
921                 ret = -EINVAL;
922                 break;
923         case 0x0103: /* key error */
924                 CIO_CRW_EVENT(2, "Access key error in secm\n");
925                 ret = -EINVAL;
926                 break;
927         case 0x0105: /* error while starting */
928                 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
929                 ret = -EIO;
930                 break;
931         default:
932                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
933                               secm_area->response.code);
934                 ret = -EIO;
935         }
936         return ret;
937 }
938
939 int
940 chsc_secm(struct channel_subsystem *css, int enable)
941 {
942         void  *secm_area;
943         int ret;
944
945         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
946         if (!secm_area)
947                 return -ENOMEM;
948
949         mutex_lock(&css->mutex);
950         if (enable && !css->cm_enabled) {
951                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
952                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
953                 if (!css->cub_addr1 || !css->cub_addr2) {
954                         free_page((unsigned long)css->cub_addr1);
955                         free_page((unsigned long)css->cub_addr2);
956                         free_page((unsigned long)secm_area);
957                         mutex_unlock(&css->mutex);
958                         return -ENOMEM;
959                 }
960         }
961         ret = __chsc_do_secm(css, enable, secm_area);
962         if (!ret) {
963                 css->cm_enabled = enable;
964                 if (css->cm_enabled) {
965                         ret = chsc_add_cmg_attr(css);
966                         if (ret) {
967                                 memset(secm_area, 0, PAGE_SIZE);
968                                 __chsc_do_secm(css, 0, secm_area);
969                                 css->cm_enabled = 0;
970                         }
971                 } else
972                         chsc_remove_cmg_attr(css);
973         }
974         if (enable && !css->cm_enabled) {
975                 free_page((unsigned long)css->cub_addr1);
976                 free_page((unsigned long)css->cub_addr2);
977         }
978         mutex_unlock(&css->mutex);
979         free_page((unsigned long)secm_area);
980         return ret;
981 }
982
983 int chsc_determine_channel_path_description(struct chp_id chpid,
984                                             struct channel_path_desc *desc)
985 {
986         int ccode, ret;
987
988         struct {
989                 struct chsc_header request;
990                 u32 : 24;
991                 u32 first_chpid : 8;
992                 u32 : 24;
993                 u32 last_chpid : 8;
994                 u32 zeroes1;
995                 struct chsc_header response;
996                 u32 zeroes2;
997                 struct channel_path_desc desc;
998         } __attribute__ ((packed)) *scpd_area;
999
1000         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1001         if (!scpd_area)
1002                 return -ENOMEM;
1003
1004         scpd_area->request.length = 0x0010;
1005         scpd_area->request.code = 0x0002;
1006
1007         scpd_area->first_chpid = chpid.id;
1008         scpd_area->last_chpid = chpid.id;
1009
1010         ccode = chsc(scpd_area);
1011         if (ccode > 0) {
1012                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1013                 goto out;
1014         }
1015
1016         switch (scpd_area->response.code) {
1017         case 0x0001: /* Success. */
1018                 memcpy(desc, &scpd_area->desc,
1019                        sizeof(struct channel_path_desc));
1020                 ret = 0;
1021                 break;
1022         case 0x0003: /* Invalid block. */
1023         case 0x0007: /* Invalid format. */
1024         case 0x0008: /* Other invalid block. */
1025                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1026                 ret = -EINVAL;
1027                 break;
1028         case 0x0004: /* Command not provided in model. */
1029                 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1030                 ret = -EOPNOTSUPP;
1031                 break;
1032         default:
1033                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1034                               scpd_area->response.code);
1035                 ret = -EIO;
1036         }
1037 out:
1038         free_page((unsigned long)scpd_area);
1039         return ret;
1040 }
1041
1042 static void
1043 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1044                           struct cmg_chars *chars)
1045 {
1046         switch (chp->cmg) {
1047         case 2:
1048         case 3:
1049                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1050                                          GFP_KERNEL);
1051                 if (chp->cmg_chars) {
1052                         int i, mask;
1053                         struct cmg_chars *cmg_chars;
1054
1055                         cmg_chars = chp->cmg_chars;
1056                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1057                                 mask = 0x80 >> (i + 3);
1058                                 if (cmcv & mask)
1059                                         cmg_chars->values[i] = chars->values[i];
1060                                 else
1061                                         cmg_chars->values[i] = 0;
1062                         }
1063                 }
1064                 break;
1065         default:
1066                 /* No cmg-dependent data. */
1067                 break;
1068         }
1069 }
1070
1071 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1072 {
1073         int ccode, ret;
1074
1075         struct {
1076                 struct chsc_header request;
1077                 u32 : 24;
1078                 u32 first_chpid : 8;
1079                 u32 : 24;
1080                 u32 last_chpid : 8;
1081                 u32 zeroes1;
1082                 struct chsc_header response;
1083                 u32 zeroes2;
1084                 u32 not_valid : 1;
1085                 u32 shared : 1;
1086                 u32 : 22;
1087                 u32 chpid : 8;
1088                 u32 cmcv : 5;
1089                 u32 : 11;
1090                 u32 cmgq : 8;
1091                 u32 cmg : 8;
1092                 u32 zeroes3;
1093                 u32 data[NR_MEASUREMENT_CHARS];
1094         } __attribute__ ((packed)) *scmc_area;
1095
1096         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1097         if (!scmc_area)
1098                 return -ENOMEM;
1099
1100         scmc_area->request.length = 0x0010;
1101         scmc_area->request.code = 0x0022;
1102
1103         scmc_area->first_chpid = chp->chpid.id;
1104         scmc_area->last_chpid = chp->chpid.id;
1105
1106         ccode = chsc(scmc_area);
1107         if (ccode > 0) {
1108                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1109                 goto out;
1110         }
1111
1112         switch (scmc_area->response.code) {
1113         case 0x0001: /* Success. */
1114                 if (!scmc_area->not_valid) {
1115                         chp->cmg = scmc_area->cmg;
1116                         chp->shared = scmc_area->shared;
1117                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1118                                                   (struct cmg_chars *)
1119                                                   &scmc_area->data);
1120                 } else {
1121                         chp->cmg = -1;
1122                         chp->shared = -1;
1123                 }
1124                 ret = 0;
1125                 break;
1126         case 0x0003: /* Invalid block. */
1127         case 0x0007: /* Invalid format. */
1128         case 0x0008: /* Invalid bit combination. */
1129                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1130                 ret = -EINVAL;
1131                 break;
1132         case 0x0004: /* Command not provided. */
1133                 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1134                 ret = -EOPNOTSUPP;
1135                 break;
1136         default:
1137                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1138                               scmc_area->response.code);
1139                 ret = -EIO;
1140         }
1141 out:
1142         free_page((unsigned long)scmc_area);
1143         return ret;
1144 }
1145
1146 static int __init
1147 chsc_alloc_sei_area(void)
1148 {
1149         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1150         if (!sei_page)
1151                 printk(KERN_WARNING"Can't allocate page for processing of " \
1152                        "chsc machine checks!\n");
1153         return (sei_page ? 0 : -ENOMEM);
1154 }
1155
1156 int __init
1157 chsc_enable_facility(int operation_code)
1158 {
1159         int ret;
1160         struct {
1161                 struct chsc_header request;
1162                 u8 reserved1:4;
1163                 u8 format:4;
1164                 u8 reserved2;
1165                 u16 operation_code;
1166                 u32 reserved3;
1167                 u32 reserved4;
1168                 u32 operation_data_area[252];
1169                 struct chsc_header response;
1170                 u32 reserved5:4;
1171                 u32 format2:4;
1172                 u32 reserved6:24;
1173         } __attribute__ ((packed)) *sda_area;
1174
1175         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1176         if (!sda_area)
1177                 return -ENOMEM;
1178         sda_area->request.length = 0x0400;
1179         sda_area->request.code = 0x0031;
1180         sda_area->operation_code = operation_code;
1181
1182         ret = chsc(sda_area);
1183         if (ret > 0) {
1184                 ret = (ret == 3) ? -ENODEV : -EBUSY;
1185                 goto out;
1186         }
1187         switch (sda_area->response.code) {
1188         case 0x0001: /* everything ok */
1189                 ret = 0;
1190                 break;
1191         case 0x0003: /* invalid request block */
1192         case 0x0007:
1193                 ret = -EINVAL;
1194                 break;
1195         case 0x0004: /* command not provided */
1196         case 0x0101: /* facility not provided */
1197                 ret = -EOPNOTSUPP;
1198                 break;
1199         default: /* something went wrong */
1200                 ret = -EIO;
1201         }
1202  out:
1203         free_page((unsigned long)sda_area);
1204         return ret;
1205 }
1206
1207 subsys_initcall(chsc_alloc_sei_area);
1208
1209 struct css_general_char css_general_characteristics;
1210 struct css_chsc_char css_chsc_characteristics;
1211
1212 int __init
1213 chsc_determine_css_characteristics(void)
1214 {
1215         int result;
1216         struct {
1217                 struct chsc_header request;
1218                 u32 reserved1;
1219                 u32 reserved2;
1220                 u32 reserved3;
1221                 struct chsc_header response;
1222                 u32 reserved4;
1223                 u32 general_char[510];
1224                 u32 chsc_char[518];
1225         } __attribute__ ((packed)) *scsc_area;
1226
1227         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1228         if (!scsc_area) {
1229                 printk(KERN_WARNING"cio: Was not able to determine available" \
1230                        "CHSCs due to no memory.\n");
1231                 return -ENOMEM;
1232         }
1233
1234         scsc_area->request.length = 0x0010;
1235         scsc_area->request.code = 0x0010;
1236
1237         result = chsc(scsc_area);
1238         if (result) {
1239                 printk(KERN_WARNING"cio: Was not able to determine " \
1240                        "available CHSCs, cc=%i.\n", result);
1241                 result = -EIO;
1242                 goto exit;
1243         }
1244
1245         if (scsc_area->response.code != 1) {
1246                 printk(KERN_WARNING"cio: Was not able to determine " \
1247                        "available CHSCs.\n");
1248                 result = -EIO;
1249                 goto exit;
1250         }
1251         memcpy(&css_general_characteristics, scsc_area->general_char,
1252                sizeof(css_general_characteristics));
1253         memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1254                sizeof(css_chsc_characteristics));
1255 exit:
1256         free_page ((unsigned long) scsc_area);
1257         return result;
1258 }
1259
1260 EXPORT_SYMBOL_GPL(css_general_characteristics);
1261 EXPORT_SYMBOL_GPL(css_chsc_characteristics);