Merge branch 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2009
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <asm/isc.h>
22 #include <asm/crw.h>
23
24 #include "css.h"
25 #include "cio.h"
26 #include "cio_debug.h"
27 #include "ioasm.h"
28 #include "chsc.h"
29 #include "device.h"
30 #include "idset.h"
31 #include "chp.h"
32
33 int css_init_done = 0;
34 static int need_reprobe = 0;
35 static int max_ssid = 0;
36
37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38
39 int
40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 {
42         struct subchannel_id schid;
43         int ret;
44
45         init_subchannel_id(&schid);
46         ret = -ENODEV;
47         do {
48                 do {
49                         ret = fn(schid, data);
50                         if (ret)
51                                 break;
52                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53                 schid.sch_no = 0;
54         } while (schid.ssid++ < max_ssid);
55         return ret;
56 }
57
58 struct cb_data {
59         void *data;
60         struct idset *set;
61         int (*fn_known_sch)(struct subchannel *, void *);
62         int (*fn_unknown_sch)(struct subchannel_id, void *);
63 };
64
65 static int call_fn_known_sch(struct device *dev, void *data)
66 {
67         struct subchannel *sch = to_subchannel(dev);
68         struct cb_data *cb = data;
69         int rc = 0;
70
71         idset_sch_del(cb->set, sch->schid);
72         if (cb->fn_known_sch)
73                 rc = cb->fn_known_sch(sch, cb->data);
74         return rc;
75 }
76
77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 {
79         struct cb_data *cb = data;
80         int rc = 0;
81
82         if (idset_sch_contains(cb->set, schid))
83                 rc = cb->fn_unknown_sch(schid, cb->data);
84         return rc;
85 }
86
87 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 {
89         struct cb_data *cb = data;
90         struct subchannel *sch;
91         int rc = 0;
92
93         sch = get_subchannel_by_schid(schid);
94         if (sch) {
95                 if (cb->fn_known_sch)
96                         rc = cb->fn_known_sch(sch, cb->data);
97                 put_device(&sch->dev);
98         } else {
99                 if (cb->fn_unknown_sch)
100                         rc = cb->fn_unknown_sch(schid, cb->data);
101         }
102
103         return rc;
104 }
105
106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
107                                int (*fn_unknown)(struct subchannel_id,
108                                void *), void *data)
109 {
110         struct cb_data cb;
111         int rc;
112
113         cb.data = data;
114         cb.fn_known_sch = fn_known;
115         cb.fn_unknown_sch = fn_unknown;
116
117         cb.set = idset_sch_new();
118         if (!cb.set)
119                 /* fall back to brute force scanning in case of oom */
120                 return for_each_subchannel(call_fn_all_sch, &cb);
121
122         idset_fill(cb.set);
123
124         /* Process registered subchannels. */
125         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
126         if (rc)
127                 goto out;
128         /* Process unregistered subchannels. */
129         if (fn_unknown)
130                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
131 out:
132         idset_free(cb.set);
133
134         return rc;
135 }
136
137 static struct subchannel *
138 css_alloc_subchannel(struct subchannel_id schid)
139 {
140         struct subchannel *sch;
141         int ret;
142
143         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
144         if (sch == NULL)
145                 return ERR_PTR(-ENOMEM);
146         ret = cio_validate_subchannel (sch, schid);
147         if (ret < 0) {
148                 kfree(sch);
149                 return ERR_PTR(ret);
150         }
151         return sch;
152 }
153
154 static void
155 css_subchannel_release(struct device *dev)
156 {
157         struct subchannel *sch;
158
159         sch = to_subchannel(dev);
160         if (!cio_is_console(sch->schid)) {
161                 /* Reset intparm to zeroes. */
162                 sch->config.intparm = 0;
163                 cio_commit_config(sch);
164                 kfree(sch->lock);
165                 kfree(sch);
166         }
167 }
168
169 static int css_sch_device_register(struct subchannel *sch)
170 {
171         int ret;
172
173         mutex_lock(&sch->reg_mutex);
174         dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
175                      sch->schid.sch_no);
176         ret = device_register(&sch->dev);
177         mutex_unlock(&sch->reg_mutex);
178         return ret;
179 }
180
181 /**
182  * css_sch_device_unregister - unregister a subchannel
183  * @sch: subchannel to be unregistered
184  */
185 void css_sch_device_unregister(struct subchannel *sch)
186 {
187         mutex_lock(&sch->reg_mutex);
188         if (device_is_registered(&sch->dev))
189                 device_unregister(&sch->dev);
190         mutex_unlock(&sch->reg_mutex);
191 }
192 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
193
194 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
195 {
196         int i;
197         int mask;
198
199         memset(ssd, 0, sizeof(struct chsc_ssd_info));
200         ssd->path_mask = pmcw->pim;
201         for (i = 0; i < 8; i++) {
202                 mask = 0x80 >> i;
203                 if (pmcw->pim & mask) {
204                         chp_id_init(&ssd->chpid[i]);
205                         ssd->chpid[i].id = pmcw->chpid[i];
206                 }
207         }
208 }
209
210 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
211 {
212         int i;
213         int mask;
214
215         for (i = 0; i < 8; i++) {
216                 mask = 0x80 >> i;
217                 if (ssd->path_mask & mask)
218                         if (!chp_is_registered(ssd->chpid[i]))
219                                 chp_new(ssd->chpid[i]);
220         }
221 }
222
223 void css_update_ssd_info(struct subchannel *sch)
224 {
225         int ret;
226
227         if (cio_is_console(sch->schid)) {
228                 /* Console is initialized too early for functions requiring
229                  * memory allocation. */
230                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
231         } else {
232                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
233                 if (ret)
234                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
235                 ssd_register_chpids(&sch->ssd_info);
236         }
237 }
238
239 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
240                          char *buf)
241 {
242         struct subchannel *sch = to_subchannel(dev);
243
244         return sprintf(buf, "%01x\n", sch->st);
245 }
246
247 static DEVICE_ATTR(type, 0444, type_show, NULL);
248
249 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
250                              char *buf)
251 {
252         struct subchannel *sch = to_subchannel(dev);
253
254         return sprintf(buf, "css:t%01X\n", sch->st);
255 }
256
257 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
258
259 static struct attribute *subch_attrs[] = {
260         &dev_attr_type.attr,
261         &dev_attr_modalias.attr,
262         NULL,
263 };
264
265 static struct attribute_group subch_attr_group = {
266         .attrs = subch_attrs,
267 };
268
269 static struct attribute_group *default_subch_attr_groups[] = {
270         &subch_attr_group,
271         NULL,
272 };
273
274 static int css_register_subchannel(struct subchannel *sch)
275 {
276         int ret;
277
278         /* Initialize the subchannel structure */
279         sch->dev.parent = &channel_subsystems[0]->device;
280         sch->dev.bus = &css_bus_type;
281         sch->dev.release = &css_subchannel_release;
282         sch->dev.groups = default_subch_attr_groups;
283         /*
284          * We don't want to generate uevents for I/O subchannels that don't
285          * have a working ccw device behind them since they will be
286          * unregistered before they can be used anyway, so we delay the add
287          * uevent until after device recognition was successful.
288          * Note that we suppress the uevent for all subchannel types;
289          * the subchannel driver can decide itself when it wants to inform
290          * userspace of its existence.
291          */
292         dev_set_uevent_suppress(&sch->dev, 1);
293         css_update_ssd_info(sch);
294         /* make it known to the system */
295         ret = css_sch_device_register(sch);
296         if (ret) {
297                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
298                               sch->schid.ssid, sch->schid.sch_no, ret);
299                 return ret;
300         }
301         if (!sch->driver) {
302                 /*
303                  * No driver matched. Generate the uevent now so that
304                  * a fitting driver module may be loaded based on the
305                  * modalias.
306                  */
307                 dev_set_uevent_suppress(&sch->dev, 0);
308                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
309         }
310         return ret;
311 }
312
313 int css_probe_device(struct subchannel_id schid)
314 {
315         int ret;
316         struct subchannel *sch;
317
318         sch = css_alloc_subchannel(schid);
319         if (IS_ERR(sch))
320                 return PTR_ERR(sch);
321         ret = css_register_subchannel(sch);
322         if (ret)
323                 put_device(&sch->dev);
324         return ret;
325 }
326
327 static int
328 check_subchannel(struct device * dev, void * data)
329 {
330         struct subchannel *sch;
331         struct subchannel_id *schid = data;
332
333         sch = to_subchannel(dev);
334         return schid_equal(&sch->schid, schid);
335 }
336
337 struct subchannel *
338 get_subchannel_by_schid(struct subchannel_id schid)
339 {
340         struct device *dev;
341
342         dev = bus_find_device(&css_bus_type, NULL,
343                               &schid, check_subchannel);
344
345         return dev ? to_subchannel(dev) : NULL;
346 }
347
348 /**
349  * css_sch_is_valid() - check if a subchannel is valid
350  * @schib: subchannel information block for the subchannel
351  */
352 int css_sch_is_valid(struct schib *schib)
353 {
354         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
355                 return 0;
356         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
357                 return 0;
358         return 1;
359 }
360 EXPORT_SYMBOL_GPL(css_sch_is_valid);
361
362 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
363 {
364         struct schib schib;
365
366         if (!slow) {
367                 /* Will be done on the slow path. */
368                 return -EAGAIN;
369         }
370         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
371                 /* Unusable - ignore. */
372                 return 0;
373         }
374         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
375                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
376
377         return css_probe_device(schid);
378 }
379
380 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
381 {
382         int ret = 0;
383
384         if (sch->driver) {
385                 if (sch->driver->sch_event)
386                         ret = sch->driver->sch_event(sch, slow);
387                 else
388                         dev_dbg(&sch->dev,
389                                 "Got subchannel machine check but "
390                                 "no sch_event handler provided.\n");
391         }
392         return ret;
393 }
394
395 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
396 {
397         struct subchannel *sch;
398         int ret;
399
400         sch = get_subchannel_by_schid(schid);
401         if (sch) {
402                 ret = css_evaluate_known_subchannel(sch, slow);
403                 put_device(&sch->dev);
404         } else
405                 ret = css_evaluate_new_subchannel(schid, slow);
406         if (ret == -EAGAIN)
407                 css_schedule_eval(schid);
408 }
409
410 static struct idset *slow_subchannel_set;
411 static spinlock_t slow_subchannel_lock;
412
413 static int __init slow_subchannel_init(void)
414 {
415         spin_lock_init(&slow_subchannel_lock);
416         slow_subchannel_set = idset_sch_new();
417         if (!slow_subchannel_set) {
418                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
419                 return -ENOMEM;
420         }
421         return 0;
422 }
423
424 static int slow_eval_known_fn(struct subchannel *sch, void *data)
425 {
426         int eval;
427         int rc;
428
429         spin_lock_irq(&slow_subchannel_lock);
430         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
431         idset_sch_del(slow_subchannel_set, sch->schid);
432         spin_unlock_irq(&slow_subchannel_lock);
433         if (eval) {
434                 rc = css_evaluate_known_subchannel(sch, 1);
435                 if (rc == -EAGAIN)
436                         css_schedule_eval(sch->schid);
437         }
438         return 0;
439 }
440
441 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
442 {
443         int eval;
444         int rc = 0;
445
446         spin_lock_irq(&slow_subchannel_lock);
447         eval = idset_sch_contains(slow_subchannel_set, schid);
448         idset_sch_del(slow_subchannel_set, schid);
449         spin_unlock_irq(&slow_subchannel_lock);
450         if (eval) {
451                 rc = css_evaluate_new_subchannel(schid, 1);
452                 switch (rc) {
453                 case -EAGAIN:
454                         css_schedule_eval(schid);
455                         rc = 0;
456                         break;
457                 case -ENXIO:
458                 case -ENOMEM:
459                 case -EIO:
460                         /* These should abort looping */
461                         break;
462                 default:
463                         rc = 0;
464                 }
465         }
466         return rc;
467 }
468
469 static void css_slow_path_func(struct work_struct *unused)
470 {
471         CIO_TRACE_EVENT(4, "slowpath");
472         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
473                                    NULL);
474 }
475
476 static DECLARE_WORK(slow_path_work, css_slow_path_func);
477 struct workqueue_struct *slow_path_wq;
478
479 void css_schedule_eval(struct subchannel_id schid)
480 {
481         unsigned long flags;
482
483         spin_lock_irqsave(&slow_subchannel_lock, flags);
484         idset_sch_add(slow_subchannel_set, schid);
485         queue_work(slow_path_wq, &slow_path_work);
486         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
487 }
488
489 void css_schedule_eval_all(void)
490 {
491         unsigned long flags;
492
493         spin_lock_irqsave(&slow_subchannel_lock, flags);
494         idset_fill(slow_subchannel_set);
495         queue_work(slow_path_wq, &slow_path_work);
496         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
497 }
498
499 void css_wait_for_slow_path(void)
500 {
501         flush_workqueue(slow_path_wq);
502 }
503
504 /* Reprobe subchannel if unregistered. */
505 static int reprobe_subchannel(struct subchannel_id schid, void *data)
506 {
507         int ret;
508
509         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
510                       schid.ssid, schid.sch_no);
511         if (need_reprobe)
512                 return -EAGAIN;
513
514         ret = css_probe_device(schid);
515         switch (ret) {
516         case 0:
517                 break;
518         case -ENXIO:
519         case -ENOMEM:
520         case -EIO:
521                 /* These should abort looping */
522                 break;
523         default:
524                 ret = 0;
525         }
526
527         return ret;
528 }
529
530 static void reprobe_after_idle(struct work_struct *unused)
531 {
532         /* Make sure initial subchannel scan is done. */
533         wait_event(ccw_device_init_wq,
534                    atomic_read(&ccw_device_init_count) == 0);
535         if (need_reprobe)
536                 css_schedule_reprobe();
537 }
538
539 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
540
541 /* Work function used to reprobe all unregistered subchannels. */
542 static void reprobe_all(struct work_struct *unused)
543 {
544         int ret;
545
546         CIO_MSG_EVENT(4, "reprobe start\n");
547
548         /* Make sure initial subchannel scan is done. */
549         if (atomic_read(&ccw_device_init_count) != 0) {
550                 queue_work(ccw_device_work, &reprobe_idle_work);
551                 return;
552         }
553         need_reprobe = 0;
554         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
555
556         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
557                       need_reprobe);
558 }
559
560 static DECLARE_WORK(css_reprobe_work, reprobe_all);
561
562 /* Schedule reprobing of all unregistered subchannels. */
563 void css_schedule_reprobe(void)
564 {
565         need_reprobe = 1;
566         queue_work(slow_path_wq, &css_reprobe_work);
567 }
568
569 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
570
571 /*
572  * Called from the machine check handler for subchannel report words.
573  */
574 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
575 {
576         struct subchannel_id mchk_schid;
577
578         if (overflow) {
579                 css_schedule_eval_all();
580                 return;
581         }
582         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
583                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
584                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
585                       crw0->erc, crw0->rsid);
586         if (crw1)
587                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
588                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
589                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
590                               crw1->anc, crw1->erc, crw1->rsid);
591         init_subchannel_id(&mchk_schid);
592         mchk_schid.sch_no = crw0->rsid;
593         if (crw1)
594                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
595
596         /*
597          * Since we are always presented with IPI in the CRW, we have to
598          * use stsch() to find out if the subchannel in question has come
599          * or gone.
600          */
601         css_evaluate_subchannel(mchk_schid, 0);
602 }
603
604 static int __init
605 __init_channel_subsystem(struct subchannel_id schid, void *data)
606 {
607         struct subchannel *sch;
608         int ret;
609
610         if (cio_is_console(schid))
611                 sch = cio_get_console_subchannel();
612         else {
613                 sch = css_alloc_subchannel(schid);
614                 if (IS_ERR(sch))
615                         ret = PTR_ERR(sch);
616                 else
617                         ret = 0;
618                 switch (ret) {
619                 case 0:
620                         break;
621                 case -ENOMEM:
622                         panic("Out of memory in init_channel_subsystem\n");
623                 /* -ENXIO: no more subchannels. */
624                 case -ENXIO:
625                         return ret;
626                 /* -EIO: this subchannel set not supported. */
627                 case -EIO:
628                         return ret;
629                 default:
630                         return 0;
631                 }
632         }
633         /*
634          * We register ALL valid subchannels in ioinfo, even those
635          * that have been present before init_channel_subsystem.
636          * These subchannels can't have been registered yet (kmalloc
637          * not working) so we do it now. This is true e.g. for the
638          * console subchannel.
639          */
640         if (css_register_subchannel(sch)) {
641                 if (!cio_is_console(schid))
642                         put_device(&sch->dev);
643         }
644         return 0;
645 }
646
647 static void __init
648 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
649 {
650         if (css_general_characteristics.mcss) {
651                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
652                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
653         } else {
654 #ifdef CONFIG_SMP
655                 css->global_pgid.pgid_high.cpu_addr = stap();
656 #else
657                 css->global_pgid.pgid_high.cpu_addr = 0;
658 #endif
659         }
660         css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
661         css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
662         css->global_pgid.tod_high = tod_high;
663
664 }
665
666 static void
667 channel_subsystem_release(struct device *dev)
668 {
669         struct channel_subsystem *css;
670
671         css = to_css(dev);
672         mutex_destroy(&css->mutex);
673         if (css->pseudo_subchannel) {
674                 /* Implies that it has been generated but never registered. */
675                 css_subchannel_release(&css->pseudo_subchannel->dev);
676                 css->pseudo_subchannel = NULL;
677         }
678         kfree(css);
679 }
680
681 static ssize_t
682 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
683                    char *buf)
684 {
685         struct channel_subsystem *css = to_css(dev);
686         int ret;
687
688         if (!css)
689                 return 0;
690         mutex_lock(&css->mutex);
691         ret = sprintf(buf, "%x\n", css->cm_enabled);
692         mutex_unlock(&css->mutex);
693         return ret;
694 }
695
696 static ssize_t
697 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
698                     const char *buf, size_t count)
699 {
700         struct channel_subsystem *css = to_css(dev);
701         int ret;
702         unsigned long val;
703
704         ret = strict_strtoul(buf, 16, &val);
705         if (ret)
706                 return ret;
707         mutex_lock(&css->mutex);
708         switch (val) {
709         case 0:
710                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
711                 break;
712         case 1:
713                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
714                 break;
715         default:
716                 ret = -EINVAL;
717         }
718         mutex_unlock(&css->mutex);
719         return ret < 0 ? ret : count;
720 }
721
722 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
723
724 static int __init setup_css(int nr)
725 {
726         u32 tod_high;
727         int ret;
728         struct channel_subsystem *css;
729
730         css = channel_subsystems[nr];
731         memset(css, 0, sizeof(struct channel_subsystem));
732         css->pseudo_subchannel =
733                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
734         if (!css->pseudo_subchannel)
735                 return -ENOMEM;
736         css->pseudo_subchannel->dev.parent = &css->device;
737         css->pseudo_subchannel->dev.release = css_subchannel_release;
738         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
739         ret = cio_create_sch_lock(css->pseudo_subchannel);
740         if (ret) {
741                 kfree(css->pseudo_subchannel);
742                 return ret;
743         }
744         mutex_init(&css->mutex);
745         css->valid = 1;
746         css->cssid = nr;
747         dev_set_name(&css->device, "css%x", nr);
748         css->device.release = channel_subsystem_release;
749         tod_high = (u32) (get_clock() >> 32);
750         css_generate_pgid(css, tod_high);
751         return 0;
752 }
753
754 static int css_reboot_event(struct notifier_block *this,
755                             unsigned long event,
756                             void *ptr)
757 {
758         int ret, i;
759
760         ret = NOTIFY_DONE;
761         for (i = 0; i <= __MAX_CSSID; i++) {
762                 struct channel_subsystem *css;
763
764                 css = channel_subsystems[i];
765                 mutex_lock(&css->mutex);
766                 if (css->cm_enabled)
767                         if (chsc_secm(css, 0))
768                                 ret = NOTIFY_BAD;
769                 mutex_unlock(&css->mutex);
770         }
771
772         return ret;
773 }
774
775 static struct notifier_block css_reboot_notifier = {
776         .notifier_call = css_reboot_event,
777 };
778
779 /*
780  * Since the css devices are neither on a bus nor have a class
781  * nor have a special device type, we cannot stop/restart channel
782  * path measurements via the normal suspend/resume callbacks, but have
783  * to use notifiers.
784  */
785 static int css_power_event(struct notifier_block *this, unsigned long event,
786                            void *ptr)
787 {
788         void *secm_area;
789         int ret, i;
790
791         switch (event) {
792         case PM_HIBERNATION_PREPARE:
793         case PM_SUSPEND_PREPARE:
794                 ret = NOTIFY_DONE;
795                 for (i = 0; i <= __MAX_CSSID; i++) {
796                         struct channel_subsystem *css;
797
798                         css = channel_subsystems[i];
799                         mutex_lock(&css->mutex);
800                         if (!css->cm_enabled) {
801                                 mutex_unlock(&css->mutex);
802                                 continue;
803                         }
804                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
805                                                             GFP_DMA);
806                         if (secm_area) {
807                                 if (__chsc_do_secm(css, 0, secm_area))
808                                         ret = NOTIFY_BAD;
809                                 free_page((unsigned long)secm_area);
810                         } else
811                                 ret = NOTIFY_BAD;
812
813                         mutex_unlock(&css->mutex);
814                 }
815                 break;
816         case PM_POST_HIBERNATION:
817         case PM_POST_SUSPEND:
818                 ret = NOTIFY_DONE;
819                 for (i = 0; i <= __MAX_CSSID; i++) {
820                         struct channel_subsystem *css;
821
822                         css = channel_subsystems[i];
823                         mutex_lock(&css->mutex);
824                         if (!css->cm_enabled) {
825                                 mutex_unlock(&css->mutex);
826                                 continue;
827                         }
828                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
829                                                             GFP_DMA);
830                         if (secm_area) {
831                                 if (__chsc_do_secm(css, 1, secm_area))
832                                         ret = NOTIFY_BAD;
833                                 free_page((unsigned long)secm_area);
834                         } else
835                                 ret = NOTIFY_BAD;
836
837                         mutex_unlock(&css->mutex);
838                 }
839                 /* search for subchannels, which appeared during hibernation */
840                 css_schedule_reprobe();
841                 break;
842         default:
843                 ret = NOTIFY_DONE;
844         }
845         return ret;
846
847 }
848 static struct notifier_block css_power_notifier = {
849         .notifier_call = css_power_event,
850 };
851
852 /*
853  * Now that the driver core is running, we can setup our channel subsystem.
854  * The struct subchannel's are created during probing (except for the
855  * static console subchannel).
856  */
857 static int __init
858 init_channel_subsystem (void)
859 {
860         int ret, i;
861
862         ret = chsc_determine_css_characteristics();
863         if (ret == -ENOMEM)
864                 goto out; /* No need to continue. */
865
866         ret = chsc_alloc_sei_area();
867         if (ret)
868                 goto out;
869
870         ret = slow_subchannel_init();
871         if (ret)
872                 goto out;
873
874         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
875         if (ret)
876                 goto out;
877
878         if ((ret = bus_register(&css_bus_type)))
879                 goto out;
880
881         /* Try to enable MSS. */
882         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
883         switch (ret) {
884         case 0: /* Success. */
885                 max_ssid = __MAX_SSID;
886                 break;
887         case -ENOMEM:
888                 goto out_bus;
889         default:
890                 max_ssid = 0;
891         }
892         /* Setup css structure. */
893         for (i = 0; i <= __MAX_CSSID; i++) {
894                 struct channel_subsystem *css;
895
896                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
897                 if (!css) {
898                         ret = -ENOMEM;
899                         goto out_unregister;
900                 }
901                 channel_subsystems[i] = css;
902                 ret = setup_css(i);
903                 if (ret) {
904                         kfree(channel_subsystems[i]);
905                         goto out_unregister;
906                 }
907                 ret = device_register(&css->device);
908                 if (ret) {
909                         put_device(&css->device);
910                         goto out_unregister;
911                 }
912                 if (css_chsc_characteristics.secm) {
913                         ret = device_create_file(&css->device,
914                                                  &dev_attr_cm_enable);
915                         if (ret)
916                                 goto out_device;
917                 }
918                 ret = device_register(&css->pseudo_subchannel->dev);
919                 if (ret) {
920                         put_device(&css->pseudo_subchannel->dev);
921                         goto out_file;
922                 }
923         }
924         ret = register_reboot_notifier(&css_reboot_notifier);
925         if (ret)
926                 goto out_unregister;
927         ret = register_pm_notifier(&css_power_notifier);
928         if (ret) {
929                 unregister_reboot_notifier(&css_reboot_notifier);
930                 goto out_unregister;
931         }
932         css_init_done = 1;
933
934         /* Enable default isc for I/O subchannels. */
935         isc_register(IO_SCH_ISC);
936
937         for_each_subchannel(__init_channel_subsystem, NULL);
938         return 0;
939 out_file:
940         if (css_chsc_characteristics.secm)
941                 device_remove_file(&channel_subsystems[i]->device,
942                                    &dev_attr_cm_enable);
943 out_device:
944         device_unregister(&channel_subsystems[i]->device);
945 out_unregister:
946         while (i > 0) {
947                 struct channel_subsystem *css;
948
949                 i--;
950                 css = channel_subsystems[i];
951                 device_unregister(&css->pseudo_subchannel->dev);
952                 css->pseudo_subchannel = NULL;
953                 if (css_chsc_characteristics.secm)
954                         device_remove_file(&css->device,
955                                            &dev_attr_cm_enable);
956                 device_unregister(&css->device);
957         }
958 out_bus:
959         bus_unregister(&css_bus_type);
960 out:
961         crw_unregister_handler(CRW_RSC_CSS);
962         chsc_free_sei_area();
963         kfree(slow_subchannel_set);
964         pr_alert("The CSS device driver initialization failed with "
965                  "errno=%d\n", ret);
966         return ret;
967 }
968
969 int sch_is_pseudo_sch(struct subchannel *sch)
970 {
971         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
972 }
973
974 static int css_bus_match(struct device *dev, struct device_driver *drv)
975 {
976         struct subchannel *sch = to_subchannel(dev);
977         struct css_driver *driver = to_cssdriver(drv);
978         struct css_device_id *id;
979
980         for (id = driver->subchannel_type; id->match_flags; id++) {
981                 if (sch->st == id->type)
982                         return 1;
983         }
984
985         return 0;
986 }
987
988 static int css_probe(struct device *dev)
989 {
990         struct subchannel *sch;
991         int ret;
992
993         sch = to_subchannel(dev);
994         sch->driver = to_cssdriver(dev->driver);
995         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
996         if (ret)
997                 sch->driver = NULL;
998         return ret;
999 }
1000
1001 static int css_remove(struct device *dev)
1002 {
1003         struct subchannel *sch;
1004         int ret;
1005
1006         sch = to_subchannel(dev);
1007         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1008         sch->driver = NULL;
1009         return ret;
1010 }
1011
1012 static void css_shutdown(struct device *dev)
1013 {
1014         struct subchannel *sch;
1015
1016         sch = to_subchannel(dev);
1017         if (sch->driver && sch->driver->shutdown)
1018                 sch->driver->shutdown(sch);
1019 }
1020
1021 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1022 {
1023         struct subchannel *sch = to_subchannel(dev);
1024         int ret;
1025
1026         ret = add_uevent_var(env, "ST=%01X", sch->st);
1027         if (ret)
1028                 return ret;
1029         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1030         return ret;
1031 }
1032
1033 static int css_pm_prepare(struct device *dev)
1034 {
1035         struct subchannel *sch = to_subchannel(dev);
1036         struct css_driver *drv;
1037
1038         if (mutex_is_locked(&sch->reg_mutex))
1039                 return -EAGAIN;
1040         if (!sch->dev.driver)
1041                 return 0;
1042         drv = to_cssdriver(sch->dev.driver);
1043         /* Notify drivers that they may not register children. */
1044         return drv->prepare ? drv->prepare(sch) : 0;
1045 }
1046
1047 static void css_pm_complete(struct device *dev)
1048 {
1049         struct subchannel *sch = to_subchannel(dev);
1050         struct css_driver *drv;
1051
1052         if (!sch->dev.driver)
1053                 return;
1054         drv = to_cssdriver(sch->dev.driver);
1055         if (drv->complete)
1056                 drv->complete(sch);
1057 }
1058
1059 static int css_pm_freeze(struct device *dev)
1060 {
1061         struct subchannel *sch = to_subchannel(dev);
1062         struct css_driver *drv;
1063
1064         if (!sch->dev.driver)
1065                 return 0;
1066         drv = to_cssdriver(sch->dev.driver);
1067         return drv->freeze ? drv->freeze(sch) : 0;
1068 }
1069
1070 static int css_pm_thaw(struct device *dev)
1071 {
1072         struct subchannel *sch = to_subchannel(dev);
1073         struct css_driver *drv;
1074
1075         if (!sch->dev.driver)
1076                 return 0;
1077         drv = to_cssdriver(sch->dev.driver);
1078         return drv->thaw ? drv->thaw(sch) : 0;
1079 }
1080
1081 static int css_pm_restore(struct device *dev)
1082 {
1083         struct subchannel *sch = to_subchannel(dev);
1084         struct css_driver *drv;
1085
1086         if (!sch->dev.driver)
1087                 return 0;
1088         drv = to_cssdriver(sch->dev.driver);
1089         return drv->restore ? drv->restore(sch) : 0;
1090 }
1091
1092 static struct dev_pm_ops css_pm_ops = {
1093         .prepare = css_pm_prepare,
1094         .complete = css_pm_complete,
1095         .freeze = css_pm_freeze,
1096         .thaw = css_pm_thaw,
1097         .restore = css_pm_restore,
1098 };
1099
1100 struct bus_type css_bus_type = {
1101         .name     = "css",
1102         .match    = css_bus_match,
1103         .probe    = css_probe,
1104         .remove   = css_remove,
1105         .shutdown = css_shutdown,
1106         .uevent   = css_uevent,
1107         .pm = &css_pm_ops,
1108 };
1109
1110 /**
1111  * css_driver_register - register a css driver
1112  * @cdrv: css driver to register
1113  *
1114  * This is mainly a wrapper around driver_register that sets name
1115  * and bus_type in the embedded struct device_driver correctly.
1116  */
1117 int css_driver_register(struct css_driver *cdrv)
1118 {
1119         cdrv->drv.name = cdrv->name;
1120         cdrv->drv.bus = &css_bus_type;
1121         cdrv->drv.owner = cdrv->owner;
1122         return driver_register(&cdrv->drv);
1123 }
1124 EXPORT_SYMBOL_GPL(css_driver_register);
1125
1126 /**
1127  * css_driver_unregister - unregister a css driver
1128  * @cdrv: css driver to unregister
1129  *
1130  * This is a wrapper around driver_unregister.
1131  */
1132 void css_driver_unregister(struct css_driver *cdrv)
1133 {
1134         driver_unregister(&cdrv->drv);
1135 }
1136 EXPORT_SYMBOL_GPL(css_driver_unregister);
1137
1138 subsys_initcall(init_channel_subsystem);
1139
1140 MODULE_LICENSE("GPL");
1141 EXPORT_SYMBOL(css_bus_type);