Merge branch 'origin'
[pandora-kernel.git] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *                            IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  *               Arnd Bergmann (arndb@de.ibm.com)
10  */
11
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17
18 #include <asm/cio.h>
19
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chsc.h"
25
26 static void *sei_page;
27
28 static int new_channel_path(int chpid);
29
30 static inline void
31 set_chp_logically_online(int chp, int onoff)
32 {
33         css[0]->chps[chp]->state = onoff;
34 }
35
36 static int
37 get_chp_status(int chp)
38 {
39         return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
40 }
41
42 void
43 chsc_validate_chpids(struct subchannel *sch)
44 {
45         int mask, chp;
46
47         for (chp = 0; chp <= 7; chp++) {
48                 mask = 0x80 >> chp;
49                 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
50                         /* disable using this path */
51                         sch->opm &= ~mask;
52         }
53 }
54
55 void
56 chpid_is_actually_online(int chp)
57 {
58         int state;
59
60         state = get_chp_status(chp);
61         if (state < 0) {
62                 need_rescan = 1;
63                 queue_work(slow_path_wq, &slow_path_work);
64         } else
65                 WARN_ON(!state);
66 }
67
68 /* FIXME: this is _always_ called for every subchannel. shouldn't we
69  *        process more than one at a time? */
70 static int
71 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
72 {
73         int ccode, j;
74
75         struct {
76                 struct chsc_header request;
77                 u16 reserved1a:10;
78                 u16 ssid:2;
79                 u16 reserved1b:4;
80                 u16 f_sch;        /* first subchannel */
81                 u16 reserved2;
82                 u16 l_sch;        /* last subchannel */
83                 u32 reserved3;
84                 struct chsc_header response;
85                 u32 reserved4;
86                 u8 sch_valid : 1;
87                 u8 dev_valid : 1;
88                 u8 st        : 3; /* subchannel type */
89                 u8 zeroes    : 3;
90                 u8  unit_addr;    /* unit address */
91                 u16 devno;        /* device number */
92                 u8 path_mask;
93                 u8 fla_valid_mask;
94                 u16 sch;          /* subchannel */
95                 u8 chpid[8];      /* chpids 0-7 */
96                 u16 fla[8];       /* full link addresses 0-7 */
97         } *ssd_area;
98
99         ssd_area = page;
100
101         ssd_area->request = (struct chsc_header) {
102                 .length = 0x0010,
103                 .code   = 0x0004,
104         };
105
106         ssd_area->ssid = sch->schid.ssid;
107         ssd_area->f_sch = sch->schid.sch_no;
108         ssd_area->l_sch = sch->schid.sch_no;
109
110         ccode = chsc(ssd_area);
111         if (ccode > 0) {
112                 pr_debug("chsc returned with ccode = %d\n", ccode);
113                 return (ccode == 3) ? -ENODEV : -EBUSY;
114         }
115
116         switch (ssd_area->response.code) {
117         case 0x0001: /* everything ok */
118                 break;
119         case 0x0002:
120                 CIO_CRW_EVENT(2, "Invalid command!\n");
121                 return -EINVAL;
122         case 0x0003:
123                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124                 return -EINVAL;
125         case 0x0004:
126                 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127                 return -EOPNOTSUPP;
128         default:
129                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
130                               ssd_area->response.code);
131                 return -EIO;
132         }
133
134         /*
135          * ssd_area->st stores the type of the detected
136          * subchannel, with the following definitions:
137          *
138          * 0: I/O subchannel:     All fields have meaning
139          * 1: CHSC subchannel:    Only sch_val, st and sch
140          *                        have meaning
141          * 2: Message subchannel: All fields except unit_addr
142          *                        have meaning
143          * 3: ADM subchannel:     Only sch_val, st and sch
144          *                        have meaning
145          *
146          * Other types are currently undefined.
147          */
148         if (ssd_area->st > 3) { /* uhm, that looks strange... */
149                 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150                               " for sch 0.%x.%04x\n", ssd_area->st,
151                               sch->schid.ssid, sch->schid.sch_no);
152                 /*
153                  * There may have been a new subchannel type defined in the
154                  * time since this code was written; since we don't know which
155                  * fields have meaning and what to do with it we just jump out
156                  */
157                 return 0;
158         } else {
159                 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
160                 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
161                               sch->schid.ssid, sch->schid.sch_no,
162                               type[ssd_area->st]);
163
164                 sch->ssd_info.valid = 1;
165                 sch->ssd_info.type = ssd_area->st;
166         }
167
168         if (ssd_area->st == 0 || ssd_area->st == 2) {
169                 for (j = 0; j < 8; j++) {
170                         if (!((0x80 >> j) & ssd_area->path_mask &
171                               ssd_area->fla_valid_mask))
172                                 continue;
173                         sch->ssd_info.chpid[j] = ssd_area->chpid[j];
174                         sch->ssd_info.fla[j]   = ssd_area->fla[j];
175                 }
176         }
177         return 0;
178 }
179
180 int
181 css_get_ssd_info(struct subchannel *sch)
182 {
183         int ret;
184         void *page;
185
186         page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
187         if (!page)
188                 return -ENOMEM;
189         spin_lock_irq(&sch->lock);
190         ret = chsc_get_sch_desc_irq(sch, page);
191         if (ret) {
192                 static int cio_chsc_err_msg;
193                 
194                 if (!cio_chsc_err_msg) {
195                         printk(KERN_ERR
196                                "chsc_get_sch_descriptions:"
197                                " Error %d while doing chsc; "
198                                "processing some machine checks may "
199                                "not work\n", ret);
200                         cio_chsc_err_msg = 1;
201                 }
202         }
203         spin_unlock_irq(&sch->lock);
204         free_page((unsigned long)page);
205         if (!ret) {
206                 int j, chpid;
207                 /* Allocate channel path structures, if needed. */
208                 for (j = 0; j < 8; j++) {
209                         chpid = sch->ssd_info.chpid[j];
210                         if (chpid && (get_chp_status(chpid) < 0))
211                             new_channel_path(chpid);
212                 }
213         }
214         return ret;
215 }
216
217 static int
218 s390_subchannel_remove_chpid(struct device *dev, void *data)
219 {
220         int j;
221         int mask;
222         struct subchannel *sch;
223         struct channel_path *chpid;
224         struct schib schib;
225
226         sch = to_subchannel(dev);
227         chpid = data;
228         for (j = 0; j < 8; j++)
229                 if (sch->schib.pmcw.chpid[j] == chpid->id)
230                         break;
231         if (j >= 8)
232                 return 0;
233
234         mask = 0x80 >> j;
235         spin_lock_irq(&sch->lock);
236
237         stsch(sch->schid, &schib);
238         if (!schib.pmcw.dnv)
239                 goto out_unreg;
240         memcpy(&sch->schib, &schib, sizeof(struct schib));
241         /* Check for single path devices. */
242         if (sch->schib.pmcw.pim == 0x80)
243                 goto out_unreg;
244         if (sch->vpm == mask)
245                 goto out_unreg;
246
247         if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
248                                      SCSW_ACTL_HALT_PEND |
249                                      SCSW_ACTL_START_PEND |
250                                      SCSW_ACTL_RESUME_PEND)) &&
251             (sch->schib.pmcw.lpum == mask)) {
252                 int cc = cio_cancel(sch);
253                 
254                 if (cc == -ENODEV)
255                         goto out_unreg;
256
257                 if (cc == -EINVAL) {
258                         cc = cio_clear(sch);
259                         if (cc == -ENODEV)
260                                 goto out_unreg;
261                         /* Call handler. */
262                         if (sch->driver && sch->driver->termination)
263                                 sch->driver->termination(&sch->dev);
264                         goto out_unlock;
265                 }
266         } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
267                    (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
268                    (sch->schib.pmcw.lpum == mask)) {
269                 int cc;
270
271                 cc = cio_clear(sch);
272                 if (cc == -ENODEV)
273                         goto out_unreg;
274                 /* Call handler. */
275                 if (sch->driver && sch->driver->termination)
276                         sch->driver->termination(&sch->dev);
277                 goto out_unlock;
278         }
279
280         /* trigger path verification. */
281         if (sch->driver && sch->driver->verify)
282                 sch->driver->verify(&sch->dev);
283 out_unlock:
284         spin_unlock_irq(&sch->lock);
285         return 0;
286 out_unreg:
287         spin_unlock_irq(&sch->lock);
288         sch->lpm = 0;
289         if (css_enqueue_subchannel_slow(sch->schid)) {
290                 css_clear_subchannel_slow_list();
291                 need_rescan = 1;
292         }
293         return 0;
294 }
295
296 static inline void
297 s390_set_chpid_offline( __u8 chpid)
298 {
299         char dbf_txt[15];
300         struct device *dev;
301
302         sprintf(dbf_txt, "chpr%x", chpid);
303         CIO_TRACE_EVENT(2, dbf_txt);
304
305         if (get_chp_status(chpid) <= 0)
306                 return;
307         dev = get_device(&css[0]->chps[chpid]->dev);
308         bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
309                          s390_subchannel_remove_chpid);
310
311         if (need_rescan || css_slow_subchannels_exist())
312                 queue_work(slow_path_wq, &slow_path_work);
313         put_device(dev);
314 }
315
316 struct res_acc_data {
317         struct channel_path *chp;
318         u32 fla_mask;
319         u16 fla;
320 };
321
322 static int
323 s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
324 {
325         int found;
326         int chp;
327         int ccode;
328         
329         found = 0;
330         for (chp = 0; chp <= 7; chp++)
331                 /*
332                  * check if chpid is in information updated by ssd
333                  */
334                 if (sch->ssd_info.valid &&
335                     sch->ssd_info.chpid[chp] == res_data->chp->id &&
336                     (sch->ssd_info.fla[chp] & res_data->fla_mask)
337                     == res_data->fla) {
338                         found = 1;
339                         break;
340                 }
341         
342         if (found == 0)
343                 return 0;
344
345         /*
346          * Do a stsch to update our subchannel structure with the
347          * new path information and eventually check for logically
348          * offline chpids.
349          */
350         ccode = stsch(sch->schid, &sch->schib);
351         if (ccode > 0)
352                 return 0;
353
354         return 0x80 >> chp;
355 }
356
357 static inline int
358 s390_process_res_acc_new_sch(struct subchannel_id schid)
359 {
360         struct schib schib;
361         int ret;
362         /*
363          * We don't know the device yet, but since a path
364          * may be available now to the device we'll have
365          * to do recognition again.
366          * Since we don't have any idea about which chpid
367          * that beast may be on we'll have to do a stsch
368          * on all devices, grr...
369          */
370         if (stsch_err(schid, &schib))
371                 /* We're through */
372                 return need_rescan ? -EAGAIN : -ENXIO;
373
374         /* Put it on the slow path. */
375         ret = css_enqueue_subchannel_slow(schid);
376         if (ret) {
377                 css_clear_subchannel_slow_list();
378                 need_rescan = 1;
379                 return -EAGAIN;
380         }
381         return 0;
382 }
383
384 static int
385 __s390_process_res_acc(struct subchannel_id schid, void *data)
386 {
387         int chp_mask, old_lpm;
388         struct res_acc_data *res_data;
389         struct subchannel *sch;
390
391         res_data = (struct res_acc_data *)data;
392         sch = get_subchannel_by_schid(schid);
393         if (!sch)
394                 /* Check if a subchannel is newly available. */
395                 return s390_process_res_acc_new_sch(schid);
396
397         spin_lock_irq(&sch->lock);
398
399         chp_mask = s390_process_res_acc_sch(res_data, sch);
400
401         if (chp_mask == 0) {
402                 spin_unlock_irq(&sch->lock);
403                 return 0;
404         }
405         old_lpm = sch->lpm;
406         sch->lpm = ((sch->schib.pmcw.pim &
407                      sch->schib.pmcw.pam &
408                      sch->schib.pmcw.pom)
409                     | chp_mask) & sch->opm;
410         if (!old_lpm && sch->lpm)
411                 device_trigger_reprobe(sch);
412         else if (sch->driver && sch->driver->verify)
413                 sch->driver->verify(&sch->dev);
414
415         spin_unlock_irq(&sch->lock);
416         put_device(&sch->dev);
417         return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
418 }
419
420
421 static int
422 s390_process_res_acc (struct res_acc_data *res_data)
423 {
424         int rc;
425         char dbf_txt[15];
426
427         sprintf(dbf_txt, "accpr%x", res_data->chp->id);
428         CIO_TRACE_EVENT( 2, dbf_txt);
429         if (res_data->fla != 0) {
430                 sprintf(dbf_txt, "fla%x", res_data->fla);
431                 CIO_TRACE_EVENT( 2, dbf_txt);
432         }
433
434         /*
435          * I/O resources may have become accessible.
436          * Scan through all subchannels that may be concerned and
437          * do a validation on those.
438          * The more information we have (info), the less scanning
439          * will we have to do.
440          */
441         rc = for_each_subchannel(__s390_process_res_acc, res_data);
442         if (css_slow_subchannels_exist())
443                 rc = -EAGAIN;
444         else if (rc != -EAGAIN)
445                 rc = 0;
446         return rc;
447 }
448
449 static int
450 __get_chpid_from_lir(void *data)
451 {
452         struct lir {
453                 u8  iq;
454                 u8  ic;
455                 u16 sci;
456                 /* incident-node descriptor */
457                 u32 indesc[28];
458                 /* attached-node descriptor */
459                 u32 andesc[28];
460                 /* incident-specific information */
461                 u32 isinfo[28];
462         } *lir;
463
464         lir = (struct lir*) data;
465         if (!(lir->iq&0x80))
466                 /* NULL link incident record */
467                 return -EINVAL;
468         if (!(lir->indesc[0]&0xc0000000))
469                 /* node descriptor not valid */
470                 return -EINVAL;
471         if (!(lir->indesc[0]&0x10000000))
472                 /* don't handle device-type nodes - FIXME */
473                 return -EINVAL;
474         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
475
476         return (u16) (lir->indesc[0]&0x000000ff);
477 }
478
479 int
480 chsc_process_crw(void)
481 {
482         int chpid, ret;
483         struct res_acc_data res_data;
484         struct {
485                 struct chsc_header request;
486                 u32 reserved1;
487                 u32 reserved2;
488                 u32 reserved3;
489                 struct chsc_header response;
490                 u32 reserved4;
491                 u8  flags;
492                 u8  vf;         /* validity flags */
493                 u8  rs;         /* reporting source */
494                 u8  cc;         /* content code */
495                 u16 fla;        /* full link address */
496                 u16 rsid;       /* reporting source id */
497                 u32 reserved5;
498                 u32 reserved6;
499                 u32 ccdf[96];   /* content-code dependent field */
500                 /* ccdf has to be big enough for a link-incident record */
501         } *sei_area;
502
503         if (!sei_page)
504                 return 0;
505         /*
506          * build the chsc request block for store event information
507          * and do the call
508          * This function is only called by the machine check handler thread,
509          * so we don't need locking for the sei_page.
510          */
511         sei_area = sei_page;
512
513         CIO_TRACE_EVENT( 2, "prcss");
514         ret = 0;
515         do {
516                 int ccode, status;
517                 struct device *dev;
518                 memset(sei_area, 0, sizeof(*sei_area));
519                 memset(&res_data, 0, sizeof(struct res_acc_data));
520                 sei_area->request = (struct chsc_header) {
521                         .length = 0x0010,
522                         .code   = 0x000e,
523                 };
524
525                 ccode = chsc(sei_area);
526                 if (ccode > 0)
527                         return 0;
528
529                 switch (sei_area->response.code) {
530                         /* for debug purposes, check for problems */
531                 case 0x0001:
532                         CIO_CRW_EVENT(4, "chsc_process_crw: event information "
533                                         "successfully stored\n");
534                         break; /* everything ok */
535                 case 0x0002:
536                         CIO_CRW_EVENT(2,
537                                       "chsc_process_crw: invalid command!\n");
538                         return 0;
539                 case 0x0003:
540                         CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
541                                       "request block!\n");
542                         return 0;
543                 case 0x0005:
544                         CIO_CRW_EVENT(2, "chsc_process_crw: no event "
545                                       "information stored\n");
546                         return 0;
547                 default:
548                         CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
549                                       sei_area->response.code);
550                         return 0;
551                 }
552
553                 /* Check if we might have lost some information. */
554                 if (sei_area->flags & 0x40)
555                         CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
556                                        "has been lost due to overflow!\n");
557
558                 if (sei_area->rs != 4) {
559                         CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
560                                       "(%04X) isn't a chpid!\n",
561                                       sei_area->rsid);
562                         continue;
563                 }
564
565                 /* which kind of information was stored? */
566                 switch (sei_area->cc) {
567                 case 1: /* link incident*/
568                         CIO_CRW_EVENT(4, "chsc_process_crw: "
569                                       "channel subsystem reports link incident,"
570                                       " reporting source is chpid %x\n",
571                                       sei_area->rsid);
572                         chpid = __get_chpid_from_lir(sei_area->ccdf);
573                         if (chpid < 0)
574                                 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
575                                               __FUNCTION__);
576                         else
577                                 s390_set_chpid_offline(chpid);
578                         break;
579                         
580                 case 2: /* i/o resource accessibiliy */
581                         CIO_CRW_EVENT(4, "chsc_process_crw: "
582                                       "channel subsystem reports some I/O "
583                                       "devices may have become accessible\n");
584                         pr_debug("Data received after sei: \n");
585                         pr_debug("Validity flags: %x\n", sei_area->vf);
586                         
587                         /* allocate a new channel path structure, if needed */
588                         status = get_chp_status(sei_area->rsid);
589                         if (status < 0)
590                                 new_channel_path(sei_area->rsid);
591                         else if (!status)
592                                 break;
593                         dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
594                         res_data.chp = to_channelpath(dev);
595                         pr_debug("chpid: %x", sei_area->rsid);
596                         if ((sei_area->vf & 0xc0) != 0) {
597                                 res_data.fla = sei_area->fla;
598                                 if ((sei_area->vf & 0xc0) == 0xc0) {
599                                         pr_debug(" full link addr: %x",
600                                                  sei_area->fla);
601                                         res_data.fla_mask = 0xffff;
602                                 } else {
603                                         pr_debug(" link addr: %x",
604                                                  sei_area->fla);
605                                         res_data.fla_mask = 0xff00;
606                                 }
607                         }
608                         ret = s390_process_res_acc(&res_data);
609                         pr_debug("\n\n");
610                         put_device(dev);
611                         break;
612                         
613                 default: /* other stuff */
614                         CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
615                                       sei_area->cc);
616                         break;
617                 }
618         } while (sei_area->flags & 0x80);
619         return ret;
620 }
621
622 static inline int
623 __chp_add_new_sch(struct subchannel_id schid)
624 {
625         struct schib schib;
626         int ret;
627
628         if (stsch(schid, &schib))
629                 /* We're through */
630                 return need_rescan ? -EAGAIN : -ENXIO;
631
632         /* Put it on the slow path. */
633         ret = css_enqueue_subchannel_slow(schid);
634         if (ret) {
635                 css_clear_subchannel_slow_list();
636                 need_rescan = 1;
637                 return -EAGAIN;
638         }
639         return 0;
640 }
641
642
643 static int
644 __chp_add(struct subchannel_id schid, void *data)
645 {
646         int i;
647         struct channel_path *chp;
648         struct subchannel *sch;
649
650         chp = (struct channel_path *)data;
651         sch = get_subchannel_by_schid(schid);
652         if (!sch)
653                 /* Check if the subchannel is now available. */
654                 return __chp_add_new_sch(schid);
655         spin_lock_irq(&sch->lock);
656         for (i=0; i<8; i++)
657                 if (sch->schib.pmcw.chpid[i] == chp->id) {
658                         if (stsch(sch->schid, &sch->schib) != 0) {
659                                 /* Endgame. */
660                                 spin_unlock(&sch->lock);
661                                 return -ENXIO;
662                         }
663                         break;
664                 }
665         if (i==8) {
666                 spin_unlock(&sch->lock);
667                 return 0;
668         }
669         sch->lpm = ((sch->schib.pmcw.pim &
670                      sch->schib.pmcw.pam &
671                      sch->schib.pmcw.pom)
672                     | 0x80 >> i) & sch->opm;
673
674         if (sch->driver && sch->driver->verify)
675                 sch->driver->verify(&sch->dev);
676
677         spin_unlock_irq(&sch->lock);
678         put_device(&sch->dev);
679         return 0;
680 }
681
682 static int
683 chp_add(int chpid)
684 {
685         int rc;
686         char dbf_txt[15];
687         struct device *dev;
688
689         if (!get_chp_status(chpid))
690                 return 0; /* no need to do the rest */
691         
692         sprintf(dbf_txt, "cadd%x", chpid);
693         CIO_TRACE_EVENT(2, dbf_txt);
694
695         dev = get_device(&css[0]->chps[chpid]->dev);
696         rc = for_each_subchannel(__chp_add, to_channelpath(dev));
697         if (css_slow_subchannels_exist())
698                 rc = -EAGAIN;
699         if (rc != -EAGAIN)
700                 rc = 0;
701         put_device(dev);
702         return rc;
703 }
704
705 /* 
706  * Handling of crw machine checks with channel path source.
707  */
708 int
709 chp_process_crw(int chpid, int on)
710 {
711         if (on == 0) {
712                 /* Path has gone. We use the link incident routine.*/
713                 s390_set_chpid_offline(chpid);
714                 return 0; /* De-register is async anyway. */
715         }
716         /*
717          * Path has come. Allocate a new channel path structure,
718          * if needed.
719          */
720         if (get_chp_status(chpid) < 0)
721                 new_channel_path(chpid);
722         /* Avoid the extra overhead in process_rec_acc. */
723         return chp_add(chpid);
724 }
725
726 static inline int
727 __check_for_io_and_kill(struct subchannel *sch, int index)
728 {
729         int cc;
730
731         if (!device_is_online(sch))
732                 /* cio could be doing I/O. */
733                 return 0;
734         cc = stsch(sch->schid, &sch->schib);
735         if (cc)
736                 return 0;
737         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
738                 device_set_waiting(sch);
739                 return 1;
740         }
741         return 0;
742 }
743
744 static inline void
745 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746 {
747         int chp, old_lpm;
748         unsigned long flags;
749
750         if (!sch->ssd_info.valid)
751                 return;
752         
753         spin_lock_irqsave(&sch->lock, flags);
754         old_lpm = sch->lpm;
755         for (chp = 0; chp < 8; chp++) {
756                 if (sch->ssd_info.chpid[chp] != chpid)
757                         continue;
758
759                 if (on) {
760                         sch->opm |= (0x80 >> chp);
761                         sch->lpm |= (0x80 >> chp);
762                         if (!old_lpm)
763                                 device_trigger_reprobe(sch);
764                         else if (sch->driver && sch->driver->verify)
765                                 sch->driver->verify(&sch->dev);
766                 } else {
767                         sch->opm &= ~(0x80 >> chp);
768                         sch->lpm &= ~(0x80 >> chp);
769                         /*
770                          * Give running I/O a grace period in which it
771                          * can successfully terminate, even using the
772                          * just varied off path. Then kill it.
773                          */
774                         if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
775                                 if (css_enqueue_subchannel_slow(sch->schid)) {
776                                         css_clear_subchannel_slow_list();
777                                         need_rescan = 1;
778                                 }
779                         } else if (sch->driver && sch->driver->verify)
780                                 sch->driver->verify(&sch->dev);
781                 }
782                 break;
783         }
784         spin_unlock_irqrestore(&sch->lock, flags);
785 }
786
787 static int
788 s390_subchannel_vary_chpid_off(struct device *dev, void *data)
789 {
790         struct subchannel *sch;
791         __u8 *chpid;
792
793         sch = to_subchannel(dev);
794         chpid = data;
795
796         __s390_subchannel_vary_chpid(sch, *chpid, 0);
797         return 0;
798 }
799
800 static int
801 s390_subchannel_vary_chpid_on(struct device *dev, void *data)
802 {
803         struct subchannel *sch;
804         __u8 *chpid;
805
806         sch = to_subchannel(dev);
807         chpid = data;
808
809         __s390_subchannel_vary_chpid(sch, *chpid, 1);
810         return 0;
811 }
812
813 static int
814 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
815 {
816         struct schib schib;
817         struct subchannel *sch;
818
819         sch = get_subchannel_by_schid(schid);
820         if (sch) {
821                 put_device(&sch->dev);
822                 return 0;
823         }
824         if (stsch_err(schid, &schib))
825                 /* We're through */
826                 return -ENXIO;
827         /* Put it on the slow path. */
828         if (css_enqueue_subchannel_slow(schid)) {
829                 css_clear_subchannel_slow_list();
830                 need_rescan = 1;
831                 return -EAGAIN;
832         }
833         return 0;
834 }
835
836 /*
837  * Function: s390_vary_chpid
838  * Varies the specified chpid online or offline
839  */
840 static int
841 s390_vary_chpid( __u8 chpid, int on)
842 {
843         char dbf_text[15];
844         int status;
845
846         sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
847         CIO_TRACE_EVENT( 2, dbf_text);
848
849         status = get_chp_status(chpid);
850         if (status < 0) {
851                 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
852                 return -EINVAL;
853         }
854
855         if (!on && !status) {
856                 printk(KERN_ERR "chpid %x is already offline\n", chpid);
857                 return -EINVAL;
858         }
859
860         set_chp_logically_online(chpid, on);
861
862         /*
863          * Redo PathVerification on the devices the chpid connects to
864          */
865
866         bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
867                          s390_subchannel_vary_chpid_on :
868                          s390_subchannel_vary_chpid_off);
869         if (on)
870                 /* Scan for new devices on varied on path. */
871                 for_each_subchannel(__s390_vary_chpid_on, NULL);
872         if (need_rescan || css_slow_subchannels_exist())
873                 queue_work(slow_path_wq, &slow_path_work);
874         return 0;
875 }
876
877 /*
878  * Files for the channel path entries.
879  */
880 static ssize_t
881 chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
882 {
883         struct channel_path *chp = container_of(dev, struct channel_path, dev);
884
885         if (!chp)
886                 return 0;
887         return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
888                 sprintf(buf, "offline\n"));
889 }
890
891 static ssize_t
892 chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
893 {
894         struct channel_path *cp = container_of(dev, struct channel_path, dev);
895         char cmd[10];
896         int num_args;
897         int error;
898
899         num_args = sscanf(buf, "%5s", cmd);
900         if (!num_args)
901                 return count;
902
903         if (!strnicmp(cmd, "on", 2))
904                 error = s390_vary_chpid(cp->id, 1);
905         else if (!strnicmp(cmd, "off", 3))
906                 error = s390_vary_chpid(cp->id, 0);
907         else
908                 error = -EINVAL;
909
910         return error < 0 ? error : count;
911
912 }
913
914 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
915
916 static ssize_t
917 chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
918 {
919         struct channel_path *chp = container_of(dev, struct channel_path, dev);
920
921         if (!chp)
922                 return 0;
923         return sprintf(buf, "%x\n", chp->desc.desc);
924 }
925
926 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
927
928 static struct attribute * chp_attrs[] = {
929         &dev_attr_status.attr,
930         &dev_attr_type.attr,
931         NULL,
932 };
933
934 static struct attribute_group chp_attr_group = {
935         .attrs = chp_attrs,
936 };
937
938 static void
939 chp_release(struct device *dev)
940 {
941         struct channel_path *cp;
942         
943         cp = container_of(dev, struct channel_path, dev);
944         kfree(cp);
945 }
946
947 static int
948 chsc_determine_channel_path_description(int chpid,
949                                         struct channel_path_desc *desc)
950 {
951         int ccode, ret;
952
953         struct {
954                 struct chsc_header request;
955                 u32 : 24;
956                 u32 first_chpid : 8;
957                 u32 : 24;
958                 u32 last_chpid : 8;
959                 u32 zeroes1;
960                 struct chsc_header response;
961                 u32 zeroes2;
962                 struct channel_path_desc desc;
963         } *scpd_area;
964
965         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
966         if (!scpd_area)
967                 return -ENOMEM;
968
969         scpd_area->request = (struct chsc_header) {
970                 .length = 0x0010,
971                 .code   = 0x0002,
972         };
973
974         scpd_area->first_chpid = chpid;
975         scpd_area->last_chpid = chpid;
976
977         ccode = chsc(scpd_area);
978         if (ccode > 0) {
979                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
980                 goto out;
981         }
982
983         switch (scpd_area->response.code) {
984         case 0x0001: /* Success. */
985                 memcpy(desc, &scpd_area->desc,
986                        sizeof(struct channel_path_desc));
987                 ret = 0;
988                 break;
989         case 0x0003: /* Invalid block. */
990         case 0x0007: /* Invalid format. */
991         case 0x0008: /* Other invalid block. */
992                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
993                 ret = -EINVAL;
994                 break;
995         case 0x0004: /* Command not provided in model. */
996                 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
997                 ret = -EOPNOTSUPP;
998                 break;
999         default:
1000                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1001                               scpd_area->response.code);
1002                 ret = -EIO;
1003         }
1004 out:
1005         free_page((unsigned long)scpd_area);
1006         return ret;
1007 }
1008
1009 /*
1010  * Entries for chpids on the system bus.
1011  * This replaces /proc/chpids.
1012  */
1013 static int
1014 new_channel_path(int chpid)
1015 {
1016         struct channel_path *chp;
1017         int ret;
1018
1019         chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
1020         if (!chp)
1021                 return -ENOMEM;
1022         memset(chp, 0, sizeof(struct channel_path));
1023
1024         /* fill in status, etc. */
1025         chp->id = chpid;
1026         chp->state = 1;
1027         chp->dev = (struct device) {
1028                 .parent  = &css[0]->device,
1029                 .release = chp_release,
1030         };
1031         snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1032
1033         /* Obtain channel path description and fill it in. */
1034         ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1035         if (ret)
1036                 goto out_free;
1037
1038         /* make it known to the system */
1039         ret = device_register(&chp->dev);
1040         if (ret) {
1041                 printk(KERN_WARNING "%s: could not register %02x\n",
1042                        __func__, chpid);
1043                 goto out_free;
1044         }
1045         ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1046         if (ret) {
1047                 device_unregister(&chp->dev);
1048                 goto out_free;
1049         } else
1050                 css[0]->chps[chpid] = chp;
1051         return ret;
1052 out_free:
1053         kfree(chp);
1054         return ret;
1055 }
1056
1057 void *
1058 chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1059 {
1060         struct channel_path *chp;
1061         struct channel_path_desc *desc;
1062
1063         chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1064         if (!chp)
1065                 return NULL;
1066         desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1067         if (!desc)
1068                 return NULL;
1069         memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1070         return desc;
1071 }
1072
1073
1074 static int __init
1075 chsc_alloc_sei_area(void)
1076 {
1077         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1078         if (!sei_page)
1079                 printk(KERN_WARNING"Can't allocate page for processing of " \
1080                        "chsc machine checks!\n");
1081         return (sei_page ? 0 : -ENOMEM);
1082 }
1083
1084 int __init
1085 chsc_enable_facility(int operation_code)
1086 {
1087         int ret;
1088         struct {
1089                 struct chsc_header request;
1090                 u8 reserved1:4;
1091                 u8 format:4;
1092                 u8 reserved2;
1093                 u16 operation_code;
1094                 u32 reserved3;
1095                 u32 reserved4;
1096                 u32 operation_data_area[252];
1097                 struct chsc_header response;
1098                 u32 reserved5:4;
1099                 u32 format2:4;
1100                 u32 reserved6:24;
1101         } *sda_area;
1102
1103         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1104         if (!sda_area)
1105                 return -ENOMEM;
1106         sda_area->request = (struct chsc_header) {
1107                 .length = 0x0400,
1108                 .code = 0x0031,
1109         };
1110         sda_area->operation_code = operation_code;
1111
1112         ret = chsc(sda_area);
1113         if (ret > 0) {
1114                 ret = (ret == 3) ? -ENODEV : -EBUSY;
1115                 goto out;
1116         }
1117         switch (sda_area->response.code) {
1118         case 0x0001: /* everything ok */
1119                 ret = 0;
1120                 break;
1121         case 0x0003: /* invalid request block */
1122         case 0x0007:
1123                 ret = -EINVAL;
1124                 break;
1125         case 0x0004: /* command not provided */
1126         case 0x0101: /* facility not provided */
1127                 ret = -EOPNOTSUPP;
1128                 break;
1129         default: /* something went wrong */
1130                 ret = -EIO;
1131         }
1132  out:
1133         free_page((unsigned long)sda_area);
1134         return ret;
1135 }
1136
1137 subsys_initcall(chsc_alloc_sei_area);
1138
1139 struct css_general_char css_general_characteristics;
1140 struct css_chsc_char css_chsc_characteristics;
1141
1142 int __init
1143 chsc_determine_css_characteristics(void)
1144 {
1145         int result;
1146         struct {
1147                 struct chsc_header request;
1148                 u32 reserved1;
1149                 u32 reserved2;
1150                 u32 reserved3;
1151                 struct chsc_header response;
1152                 u32 reserved4;
1153                 u32 general_char[510];
1154                 u32 chsc_char[518];
1155         } *scsc_area;
1156
1157         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1158         if (!scsc_area) {
1159                 printk(KERN_WARNING"cio: Was not able to determine available" \
1160                        "CHSCs due to no memory.\n");
1161                 return -ENOMEM;
1162         }
1163
1164         scsc_area->request = (struct chsc_header) {
1165                 .length = 0x0010,
1166                 .code   = 0x0010,
1167         };
1168
1169         result = chsc(scsc_area);
1170         if (result) {
1171                 printk(KERN_WARNING"cio: Was not able to determine " \
1172                        "available CHSCs, cc=%i.\n", result);
1173                 result = -EIO;
1174                 goto exit;
1175         }
1176
1177         if (scsc_area->response.code != 1) {
1178                 printk(KERN_WARNING"cio: Was not able to determine " \
1179                        "available CHSCs.\n");
1180                 result = -EIO;
1181                 goto exit;
1182         }
1183         memcpy(&css_general_characteristics, scsc_area->general_char,
1184                sizeof(css_general_characteristics));
1185         memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1186                sizeof(css_chsc_characteristics));
1187 exit:
1188         free_page ((unsigned long) scsc_area);
1189         return result;
1190 }
1191
1192 EXPORT_SYMBOL_GPL(css_general_characteristics);
1193 EXPORT_SYMBOL_GPL(css_chsc_characteristics);