[S390] cio: Introduce modalias for css bus.
[pandora-kernel.git] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *               Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/device.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/reboot.h>
16
17 #include "css.h"
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "ioasm.h"
21 #include "chsc.h"
22 #include "device.h"
23 #include "idset.h"
24 #include "chp.h"
25
26 int css_init_done = 0;
27 static int need_reprobe = 0;
28 static int max_ssid = 0;
29
30 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
31
32 int css_characteristics_avail = 0;
33
34 int
35 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
36 {
37         struct subchannel_id schid;
38         int ret;
39
40         init_subchannel_id(&schid);
41         ret = -ENODEV;
42         do {
43                 do {
44                         ret = fn(schid, data);
45                         if (ret)
46                                 break;
47                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
48                 schid.sch_no = 0;
49         } while (schid.ssid++ < max_ssid);
50         return ret;
51 }
52
53 struct cb_data {
54         void *data;
55         struct idset *set;
56         int (*fn_known_sch)(struct subchannel *, void *);
57         int (*fn_unknown_sch)(struct subchannel_id, void *);
58 };
59
60 static int call_fn_known_sch(struct device *dev, void *data)
61 {
62         struct subchannel *sch = to_subchannel(dev);
63         struct cb_data *cb = data;
64         int rc = 0;
65
66         idset_sch_del(cb->set, sch->schid);
67         if (cb->fn_known_sch)
68                 rc = cb->fn_known_sch(sch, cb->data);
69         return rc;
70 }
71
72 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
73 {
74         struct cb_data *cb = data;
75         int rc = 0;
76
77         if (idset_sch_contains(cb->set, schid))
78                 rc = cb->fn_unknown_sch(schid, cb->data);
79         return rc;
80 }
81
82 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
83                                int (*fn_unknown)(struct subchannel_id,
84                                void *), void *data)
85 {
86         struct cb_data cb;
87         int rc;
88
89         cb.set = idset_sch_new();
90         if (!cb.set)
91                 return -ENOMEM;
92         idset_fill(cb.set);
93         cb.data = data;
94         cb.fn_known_sch = fn_known;
95         cb.fn_unknown_sch = fn_unknown;
96         /* Process registered subchannels. */
97         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
98         if (rc)
99                 goto out;
100         /* Process unregistered subchannels. */
101         if (fn_unknown)
102                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
103 out:
104         idset_free(cb.set);
105
106         return rc;
107 }
108
109 static struct subchannel *
110 css_alloc_subchannel(struct subchannel_id schid)
111 {
112         struct subchannel *sch;
113         int ret;
114
115         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
116         if (sch == NULL)
117                 return ERR_PTR(-ENOMEM);
118         ret = cio_validate_subchannel (sch, schid);
119         if (ret < 0) {
120                 kfree(sch);
121                 return ERR_PTR(ret);
122         }
123         return sch;
124 }
125
126 static void
127 css_free_subchannel(struct subchannel *sch)
128 {
129         if (sch) {
130                 /* Reset intparm to zeroes. */
131                 sch->schib.pmcw.intparm = 0;
132                 cio_modify(sch);
133                 kfree(sch->lock);
134                 kfree(sch);
135         }
136 }
137
138 static void
139 css_subchannel_release(struct device *dev)
140 {
141         struct subchannel *sch;
142
143         sch = to_subchannel(dev);
144         if (!cio_is_console(sch->schid)) {
145                 kfree(sch->lock);
146                 kfree(sch);
147         }
148 }
149
150 static int css_sch_device_register(struct subchannel *sch)
151 {
152         int ret;
153
154         mutex_lock(&sch->reg_mutex);
155         ret = device_register(&sch->dev);
156         mutex_unlock(&sch->reg_mutex);
157         return ret;
158 }
159
160 void css_sch_device_unregister(struct subchannel *sch)
161 {
162         mutex_lock(&sch->reg_mutex);
163         device_unregister(&sch->dev);
164         mutex_unlock(&sch->reg_mutex);
165 }
166
167 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
168 {
169         int i;
170         int mask;
171
172         memset(ssd, 0, sizeof(struct chsc_ssd_info));
173         ssd->path_mask = pmcw->pim;
174         for (i = 0; i < 8; i++) {
175                 mask = 0x80 >> i;
176                 if (pmcw->pim & mask) {
177                         chp_id_init(&ssd->chpid[i]);
178                         ssd->chpid[i].id = pmcw->chpid[i];
179                 }
180         }
181 }
182
183 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
184 {
185         int i;
186         int mask;
187
188         for (i = 0; i < 8; i++) {
189                 mask = 0x80 >> i;
190                 if (ssd->path_mask & mask)
191                         if (!chp_is_registered(ssd->chpid[i]))
192                                 chp_new(ssd->chpid[i]);
193         }
194 }
195
196 void css_update_ssd_info(struct subchannel *sch)
197 {
198         int ret;
199
200         if (cio_is_console(sch->schid)) {
201                 /* Console is initialized too early for functions requiring
202                  * memory allocation. */
203                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
204         } else {
205                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
206                 if (ret)
207                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
208                 ssd_register_chpids(&sch->ssd_info);
209         }
210 }
211
212 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
213                          char *buf)
214 {
215         struct subchannel *sch = to_subchannel(dev);
216
217         return sprintf(buf, "%01x\n", sch->st);
218 }
219
220 static DEVICE_ATTR(type, 0444, type_show, NULL);
221
222 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
223                              char *buf)
224 {
225         struct subchannel *sch = to_subchannel(dev);
226
227         return sprintf(buf, "css:t%01X\n", sch->st);
228 }
229
230 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
231
232 static struct attribute *subch_attrs[] = {
233         &dev_attr_type.attr,
234         &dev_attr_modalias.attr,
235         NULL,
236 };
237
238 static struct attribute_group subch_attr_group = {
239         .attrs = subch_attrs,
240 };
241
242 static struct attribute_group *default_subch_attr_groups[] = {
243         &subch_attr_group,
244         NULL,
245 };
246
247 static int css_register_subchannel(struct subchannel *sch)
248 {
249         int ret;
250
251         /* Initialize the subchannel structure */
252         sch->dev.parent = &channel_subsystems[0]->device;
253         sch->dev.bus = &css_bus_type;
254         sch->dev.release = &css_subchannel_release;
255         sch->dev.groups = default_subch_attr_groups;
256         /*
257          * We don't want to generate uevents for I/O subchannels that don't
258          * have a working ccw device behind them since they will be
259          * unregistered before they can be used anyway, so we delay the add
260          * uevent until after device recognition was successful.
261          * Note that we suppress the uevent for all subchannel types;
262          * the subchannel driver can decide itself when it wants to inform
263          * userspace of its existence.
264          */
265         sch->dev.uevent_suppress = 1;
266         css_update_ssd_info(sch);
267         /* make it known to the system */
268         ret = css_sch_device_register(sch);
269         if (ret) {
270                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
271                               sch->schid.ssid, sch->schid.sch_no, ret);
272                 return ret;
273         }
274         if (!sch->driver) {
275                 /*
276                  * No driver matched. Generate the uevent now so that
277                  * a fitting driver module may be loaded based on the
278                  * modalias.
279                  */
280                 sch->dev.uevent_suppress = 0;
281                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
282         }
283         return ret;
284 }
285
286 static int css_probe_device(struct subchannel_id schid)
287 {
288         int ret;
289         struct subchannel *sch;
290
291         sch = css_alloc_subchannel(schid);
292         if (IS_ERR(sch))
293                 return PTR_ERR(sch);
294         ret = css_register_subchannel(sch);
295         if (ret)
296                 css_free_subchannel(sch);
297         return ret;
298 }
299
300 static int
301 check_subchannel(struct device * dev, void * data)
302 {
303         struct subchannel *sch;
304         struct subchannel_id *schid = data;
305
306         sch = to_subchannel(dev);
307         return schid_equal(&sch->schid, schid);
308 }
309
310 struct subchannel *
311 get_subchannel_by_schid(struct subchannel_id schid)
312 {
313         struct device *dev;
314
315         dev = bus_find_device(&css_bus_type, NULL,
316                               &schid, check_subchannel);
317
318         return dev ? to_subchannel(dev) : NULL;
319 }
320
321 /**
322  * css_sch_is_valid() - check if a subchannel is valid
323  * @schib: subchannel information block for the subchannel
324  */
325 int css_sch_is_valid(struct schib *schib)
326 {
327         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
328                 return 0;
329         return 1;
330 }
331 EXPORT_SYMBOL_GPL(css_sch_is_valid);
332
333 static int css_get_subchannel_status(struct subchannel *sch)
334 {
335         struct schib schib;
336
337         if (stsch(sch->schid, &schib))
338                 return CIO_GONE;
339         if (!css_sch_is_valid(&schib))
340                 return CIO_GONE;
341         if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
342                 return CIO_REVALIDATE;
343         if (!sch->lpm)
344                 return CIO_NO_PATH;
345         return CIO_OPER;
346 }
347
348 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
349 {
350         int event, ret, disc;
351         unsigned long flags;
352         enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
353
354         spin_lock_irqsave(sch->lock, flags);
355         disc = device_is_disconnected(sch);
356         if (disc && slow) {
357                 /* Disconnected devices are evaluated directly only.*/
358                 spin_unlock_irqrestore(sch->lock, flags);
359                 return 0;
360         }
361         /* No interrupt after machine check - kill pending timers. */
362         device_kill_pending_timer(sch);
363         if (!disc && !slow) {
364                 /* Non-disconnected devices are evaluated on the slow path. */
365                 spin_unlock_irqrestore(sch->lock, flags);
366                 return -EAGAIN;
367         }
368         event = css_get_subchannel_status(sch);
369         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
370                       sch->schid.ssid, sch->schid.sch_no, event,
371                       disc ? "disconnected" : "normal",
372                       slow ? "slow" : "fast");
373         /* Analyze subchannel status. */
374         action = NONE;
375         switch (event) {
376         case CIO_NO_PATH:
377                 if (disc) {
378                         /* Check if paths have become available. */
379                         action = REPROBE;
380                         break;
381                 }
382                 /* fall through */
383         case CIO_GONE:
384                 /* Prevent unwanted effects when opening lock. */
385                 cio_disable_subchannel(sch);
386                 device_set_disconnected(sch);
387                 /* Ask driver what to do with device. */
388                 action = UNREGISTER;
389                 if (sch->driver && sch->driver->notify) {
390                         spin_unlock_irqrestore(sch->lock, flags);
391                         ret = sch->driver->notify(sch, event);
392                         spin_lock_irqsave(sch->lock, flags);
393                         if (ret)
394                                 action = NONE;
395                 }
396                 break;
397         case CIO_REVALIDATE:
398                 /* Device will be removed, so no notify necessary. */
399                 if (disc)
400                         /* Reprobe because immediate unregister might block. */
401                         action = REPROBE;
402                 else
403                         action = UNREGISTER_PROBE;
404                 break;
405         case CIO_OPER:
406                 if (disc)
407                         /* Get device operational again. */
408                         action = REPROBE;
409                 break;
410         }
411         /* Perform action. */
412         ret = 0;
413         switch (action) {
414         case UNREGISTER:
415         case UNREGISTER_PROBE:
416                 /* Unregister device (will use subchannel lock). */
417                 spin_unlock_irqrestore(sch->lock, flags);
418                 css_sch_device_unregister(sch);
419                 spin_lock_irqsave(sch->lock, flags);
420
421                 /* Reset intparm to zeroes. */
422                 sch->schib.pmcw.intparm = 0;
423                 cio_modify(sch);
424                 break;
425         case REPROBE:
426                 device_trigger_reprobe(sch);
427                 break;
428         default:
429                 break;
430         }
431         spin_unlock_irqrestore(sch->lock, flags);
432         /* Probe if necessary. */
433         if (action == UNREGISTER_PROBE)
434                 ret = css_probe_device(sch->schid);
435
436         return ret;
437 }
438
439 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
440 {
441         struct schib schib;
442
443         if (!slow) {
444                 /* Will be done on the slow path. */
445                 return -EAGAIN;
446         }
447         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
448                 /* Unusable - ignore. */
449                 return 0;
450         }
451         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
452                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
453
454         return css_probe_device(schid);
455 }
456
457 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
458 {
459         struct subchannel *sch;
460         int ret;
461
462         sch = get_subchannel_by_schid(schid);
463         if (sch) {
464                 ret = css_evaluate_known_subchannel(sch, slow);
465                 put_device(&sch->dev);
466         } else
467                 ret = css_evaluate_new_subchannel(schid, slow);
468         if (ret == -EAGAIN)
469                 css_schedule_eval(schid);
470 }
471
472 static struct idset *slow_subchannel_set;
473 static spinlock_t slow_subchannel_lock;
474
475 static int __init slow_subchannel_init(void)
476 {
477         spin_lock_init(&slow_subchannel_lock);
478         slow_subchannel_set = idset_sch_new();
479         if (!slow_subchannel_set) {
480                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
481                 return -ENOMEM;
482         }
483         return 0;
484 }
485
486 static int slow_eval_known_fn(struct subchannel *sch, void *data)
487 {
488         int eval;
489         int rc;
490
491         spin_lock_irq(&slow_subchannel_lock);
492         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
493         idset_sch_del(slow_subchannel_set, sch->schid);
494         spin_unlock_irq(&slow_subchannel_lock);
495         if (eval) {
496                 rc = css_evaluate_known_subchannel(sch, 1);
497                 if (rc == -EAGAIN)
498                         css_schedule_eval(sch->schid);
499         }
500         return 0;
501 }
502
503 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
504 {
505         int eval;
506         int rc = 0;
507
508         spin_lock_irq(&slow_subchannel_lock);
509         eval = idset_sch_contains(slow_subchannel_set, schid);
510         idset_sch_del(slow_subchannel_set, schid);
511         spin_unlock_irq(&slow_subchannel_lock);
512         if (eval) {
513                 rc = css_evaluate_new_subchannel(schid, 1);
514                 switch (rc) {
515                 case -EAGAIN:
516                         css_schedule_eval(schid);
517                         rc = 0;
518                         break;
519                 case -ENXIO:
520                 case -ENOMEM:
521                 case -EIO:
522                         /* These should abort looping */
523                         break;
524                 default:
525                         rc = 0;
526                 }
527         }
528         return rc;
529 }
530
531 static void css_slow_path_func(struct work_struct *unused)
532 {
533         CIO_TRACE_EVENT(4, "slowpath");
534         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
535                                    NULL);
536 }
537
538 static DECLARE_WORK(slow_path_work, css_slow_path_func);
539 struct workqueue_struct *slow_path_wq;
540
541 void css_schedule_eval(struct subchannel_id schid)
542 {
543         unsigned long flags;
544
545         spin_lock_irqsave(&slow_subchannel_lock, flags);
546         idset_sch_add(slow_subchannel_set, schid);
547         queue_work(slow_path_wq, &slow_path_work);
548         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
549 }
550
551 void css_schedule_eval_all(void)
552 {
553         unsigned long flags;
554
555         spin_lock_irqsave(&slow_subchannel_lock, flags);
556         idset_fill(slow_subchannel_set);
557         queue_work(slow_path_wq, &slow_path_work);
558         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
559 }
560
561 void css_wait_for_slow_path(void)
562 {
563         flush_workqueue(ccw_device_notify_work);
564         flush_workqueue(slow_path_wq);
565 }
566
567 /* Reprobe subchannel if unregistered. */
568 static int reprobe_subchannel(struct subchannel_id schid, void *data)
569 {
570         int ret;
571
572         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
573                       schid.ssid, schid.sch_no);
574         if (need_reprobe)
575                 return -EAGAIN;
576
577         ret = css_probe_device(schid);
578         switch (ret) {
579         case 0:
580                 break;
581         case -ENXIO:
582         case -ENOMEM:
583         case -EIO:
584                 /* These should abort looping */
585                 break;
586         default:
587                 ret = 0;
588         }
589
590         return ret;
591 }
592
593 /* Work function used to reprobe all unregistered subchannels. */
594 static void reprobe_all(struct work_struct *unused)
595 {
596         int ret;
597
598         CIO_MSG_EVENT(4, "reprobe start\n");
599
600         need_reprobe = 0;
601         /* Make sure initial subchannel scan is done. */
602         wait_event(ccw_device_init_wq,
603                    atomic_read(&ccw_device_init_count) == 0);
604         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
605
606         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
607                       need_reprobe);
608 }
609
610 static DECLARE_WORK(css_reprobe_work, reprobe_all);
611
612 /* Schedule reprobing of all unregistered subchannels. */
613 void css_schedule_reprobe(void)
614 {
615         need_reprobe = 1;
616         queue_work(slow_path_wq, &css_reprobe_work);
617 }
618
619 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
620
621 /*
622  * Called from the machine check handler for subchannel report words.
623  */
624 void css_process_crw(int rsid1, int rsid2)
625 {
626         struct subchannel_id mchk_schid;
627
628         CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
629                       rsid1, rsid2);
630         init_subchannel_id(&mchk_schid);
631         mchk_schid.sch_no = rsid1;
632         if (rsid2 != 0)
633                 mchk_schid.ssid = (rsid2 >> 8) & 3;
634
635         /* 
636          * Since we are always presented with IPI in the CRW, we have to
637          * use stsch() to find out if the subchannel in question has come
638          * or gone.
639          */
640         css_evaluate_subchannel(mchk_schid, 0);
641 }
642
643 static int __init
644 __init_channel_subsystem(struct subchannel_id schid, void *data)
645 {
646         struct subchannel *sch;
647         int ret;
648
649         if (cio_is_console(schid))
650                 sch = cio_get_console_subchannel();
651         else {
652                 sch = css_alloc_subchannel(schid);
653                 if (IS_ERR(sch))
654                         ret = PTR_ERR(sch);
655                 else
656                         ret = 0;
657                 switch (ret) {
658                 case 0:
659                         break;
660                 case -ENOMEM:
661                         panic("Out of memory in init_channel_subsystem\n");
662                 /* -ENXIO: no more subchannels. */
663                 case -ENXIO:
664                         return ret;
665                 /* -EIO: this subchannel set not supported. */
666                 case -EIO:
667                         return ret;
668                 default:
669                         return 0;
670                 }
671         }
672         /*
673          * We register ALL valid subchannels in ioinfo, even those
674          * that have been present before init_channel_subsystem.
675          * These subchannels can't have been registered yet (kmalloc
676          * not working) so we do it now. This is true e.g. for the
677          * console subchannel.
678          */
679         css_register_subchannel(sch);
680         return 0;
681 }
682
683 static void __init
684 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
685 {
686         if (css_characteristics_avail && css_general_characteristics.mcss) {
687                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
688                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
689         } else {
690 #ifdef CONFIG_SMP
691                 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
692 #else
693                 css->global_pgid.pgid_high.cpu_addr = 0;
694 #endif
695         }
696         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
697         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
698         css->global_pgid.tod_high = tod_high;
699
700 }
701
702 static void
703 channel_subsystem_release(struct device *dev)
704 {
705         struct channel_subsystem *css;
706
707         css = to_css(dev);
708         mutex_destroy(&css->mutex);
709         kfree(css);
710 }
711
712 static ssize_t
713 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
714                    char *buf)
715 {
716         struct channel_subsystem *css = to_css(dev);
717         int ret;
718
719         if (!css)
720                 return 0;
721         mutex_lock(&css->mutex);
722         ret = sprintf(buf, "%x\n", css->cm_enabled);
723         mutex_unlock(&css->mutex);
724         return ret;
725 }
726
727 static ssize_t
728 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
729                     const char *buf, size_t count)
730 {
731         struct channel_subsystem *css = to_css(dev);
732         int ret;
733         unsigned long val;
734
735         ret = strict_strtoul(buf, 16, &val);
736         if (ret)
737                 return ret;
738         mutex_lock(&css->mutex);
739         switch (val) {
740         case 0:
741                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
742                 break;
743         case 1:
744                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
745                 break;
746         default:
747                 ret = -EINVAL;
748         }
749         mutex_unlock(&css->mutex);
750         return ret < 0 ? ret : count;
751 }
752
753 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
754
755 static int __init setup_css(int nr)
756 {
757         u32 tod_high;
758         int ret;
759         struct channel_subsystem *css;
760
761         css = channel_subsystems[nr];
762         memset(css, 0, sizeof(struct channel_subsystem));
763         css->pseudo_subchannel =
764                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
765         if (!css->pseudo_subchannel)
766                 return -ENOMEM;
767         css->pseudo_subchannel->dev.parent = &css->device;
768         css->pseudo_subchannel->dev.release = css_subchannel_release;
769         sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
770         ret = cio_create_sch_lock(css->pseudo_subchannel);
771         if (ret) {
772                 kfree(css->pseudo_subchannel);
773                 return ret;
774         }
775         mutex_init(&css->mutex);
776         css->valid = 1;
777         css->cssid = nr;
778         sprintf(css->device.bus_id, "css%x", nr);
779         css->device.release = channel_subsystem_release;
780         tod_high = (u32) (get_clock() >> 32);
781         css_generate_pgid(css, tod_high);
782         return 0;
783 }
784
785 static int css_reboot_event(struct notifier_block *this,
786                             unsigned long event,
787                             void *ptr)
788 {
789         int ret, i;
790
791         ret = NOTIFY_DONE;
792         for (i = 0; i <= __MAX_CSSID; i++) {
793                 struct channel_subsystem *css;
794
795                 css = channel_subsystems[i];
796                 mutex_lock(&css->mutex);
797                 if (css->cm_enabled)
798                         if (chsc_secm(css, 0))
799                                 ret = NOTIFY_BAD;
800                 mutex_unlock(&css->mutex);
801         }
802
803         return ret;
804 }
805
806 static struct notifier_block css_reboot_notifier = {
807         .notifier_call = css_reboot_event,
808 };
809
810 /*
811  * Now that the driver core is running, we can setup our channel subsystem.
812  * The struct subchannel's are created during probing (except for the
813  * static console subchannel).
814  */
815 static int __init
816 init_channel_subsystem (void)
817 {
818         int ret, i;
819
820         ret = chsc_determine_css_characteristics();
821         if (ret == -ENOMEM)
822                 goto out; /* No need to continue. */
823         if (ret == 0)
824                 css_characteristics_avail = 1;
825
826         ret = chsc_alloc_sei_area();
827         if (ret)
828                 goto out;
829
830         ret = slow_subchannel_init();
831         if (ret)
832                 goto out;
833
834         if ((ret = bus_register(&css_bus_type)))
835                 goto out;
836
837         /* Try to enable MSS. */
838         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
839         switch (ret) {
840         case 0: /* Success. */
841                 max_ssid = __MAX_SSID;
842                 break;
843         case -ENOMEM:
844                 goto out_bus;
845         default:
846                 max_ssid = 0;
847         }
848         /* Setup css structure. */
849         for (i = 0; i <= __MAX_CSSID; i++) {
850                 struct channel_subsystem *css;
851
852                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
853                 if (!css) {
854                         ret = -ENOMEM;
855                         goto out_unregister;
856                 }
857                 channel_subsystems[i] = css;
858                 ret = setup_css(i);
859                 if (ret)
860                         goto out_free;
861                 ret = device_register(&css->device);
862                 if (ret)
863                         goto out_free_all;
864                 if (css_characteristics_avail &&
865                     css_chsc_characteristics.secm) {
866                         ret = device_create_file(&css->device,
867                                                  &dev_attr_cm_enable);
868                         if (ret)
869                                 goto out_device;
870                 }
871                 ret = device_register(&css->pseudo_subchannel->dev);
872                 if (ret)
873                         goto out_file;
874         }
875         ret = register_reboot_notifier(&css_reboot_notifier);
876         if (ret)
877                 goto out_pseudo;
878         css_init_done = 1;
879
880         ctl_set_bit(6, 28);
881
882         for_each_subchannel(__init_channel_subsystem, NULL);
883         return 0;
884 out_pseudo:
885         device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
886 out_file:
887         device_remove_file(&channel_subsystems[i]->device,
888                            &dev_attr_cm_enable);
889 out_device:
890         device_unregister(&channel_subsystems[i]->device);
891 out_free_all:
892         kfree(channel_subsystems[i]->pseudo_subchannel->lock);
893         kfree(channel_subsystems[i]->pseudo_subchannel);
894 out_free:
895         kfree(channel_subsystems[i]);
896 out_unregister:
897         while (i > 0) {
898                 struct channel_subsystem *css;
899
900                 i--;
901                 css = channel_subsystems[i];
902                 device_unregister(&css->pseudo_subchannel->dev);
903                 if (css_characteristics_avail && css_chsc_characteristics.secm)
904                         device_remove_file(&css->device,
905                                            &dev_attr_cm_enable);
906                 device_unregister(&css->device);
907         }
908 out_bus:
909         bus_unregister(&css_bus_type);
910 out:
911         chsc_free_sei_area();
912         kfree(slow_subchannel_set);
913         printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
914                ret);
915         return ret;
916 }
917
918 int sch_is_pseudo_sch(struct subchannel *sch)
919 {
920         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
921 }
922
923 /*
924  * find a driver for a subchannel. They identify by the subchannel
925  * type with the exception that the console subchannel driver has its own
926  * subchannel type although the device is an i/o subchannel
927  */
928 static int
929 css_bus_match (struct device *dev, struct device_driver *drv)
930 {
931         struct subchannel *sch = to_subchannel(dev);
932         struct css_driver *driver = to_cssdriver(drv);
933
934         if (sch->st == driver->subchannel_type)
935                 return 1;
936
937         return 0;
938 }
939
940 static int css_probe(struct device *dev)
941 {
942         struct subchannel *sch;
943         int ret;
944
945         sch = to_subchannel(dev);
946         sch->driver = to_cssdriver(dev->driver);
947         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
948         if (ret)
949                 sch->driver = NULL;
950         return ret;
951 }
952
953 static int css_remove(struct device *dev)
954 {
955         struct subchannel *sch;
956         int ret;
957
958         sch = to_subchannel(dev);
959         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
960         sch->driver = NULL;
961         return ret;
962 }
963
964 static void css_shutdown(struct device *dev)
965 {
966         struct subchannel *sch;
967
968         sch = to_subchannel(dev);
969         if (sch->driver && sch->driver->shutdown)
970                 sch->driver->shutdown(sch);
971 }
972
973 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
974 {
975         struct subchannel *sch = to_subchannel(dev);
976         int ret;
977
978         ret = add_uevent_var(env, "ST=%01X", sch->st);
979         if (ret)
980                 return ret;
981         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
982         return ret;
983 }
984
985 struct bus_type css_bus_type = {
986         .name     = "css",
987         .match    = css_bus_match,
988         .probe    = css_probe,
989         .remove   = css_remove,
990         .shutdown = css_shutdown,
991         .uevent   = css_uevent,
992 };
993
994 /**
995  * css_driver_register - register a css driver
996  * @cdrv: css driver to register
997  *
998  * This is mainly a wrapper around driver_register that sets name
999  * and bus_type in the embedded struct device_driver correctly.
1000  */
1001 int css_driver_register(struct css_driver *cdrv)
1002 {
1003         cdrv->drv.name = cdrv->name;
1004         cdrv->drv.bus = &css_bus_type;
1005         cdrv->drv.owner = cdrv->owner;
1006         return driver_register(&cdrv->drv);
1007 }
1008 EXPORT_SYMBOL_GPL(css_driver_register);
1009
1010 /**
1011  * css_driver_unregister - unregister a css driver
1012  * @cdrv: css driver to unregister
1013  *
1014  * This is a wrapper around driver_unregister.
1015  */
1016 void css_driver_unregister(struct css_driver *cdrv)
1017 {
1018         driver_unregister(&cdrv->drv);
1019 }
1020 EXPORT_SYMBOL_GPL(css_driver_unregister);
1021
1022 subsys_initcall(init_channel_subsystem);
1023
1024 MODULE_LICENSE("GPL");
1025 EXPORT_SYMBOL(css_bus_type);
1026 EXPORT_SYMBOL_GPL(css_characteristics_avail);