1 /* ds.c: Domain Services driver for Logical Domains
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/mutex.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
20 #include <asm/power.h>
21 #include <asm/mdesc.h>
24 #include <asm/hvtramp.h>
26 #define DRV_MODULE_NAME "ds"
27 #define PFX DRV_MODULE_NAME ": "
28 #define DRV_MODULE_VERSION "1.0"
29 #define DRV_MODULE_RELDATE "Jul 11, 2007"
31 static char version[] __devinitdata =
32 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
33 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
34 MODULE_DESCRIPTION("Sun LDOM domain services driver");
35 MODULE_LICENSE("GPL");
36 MODULE_VERSION(DRV_MODULE_VERSION);
40 #define DS_INIT_REQ 0x00
41 #define DS_INIT_ACK 0x01
42 #define DS_INIT_NACK 0x02
43 #define DS_REG_REQ 0x03
44 #define DS_REG_ACK 0x04
45 #define DS_REG_NACK 0x05
46 #define DS_UNREG_REQ 0x06
47 #define DS_UNREG_ACK 0x07
48 #define DS_UNREG_NACK 0x08
57 #define DS_REG_VER_NACK 0x01
58 #define DS_REG_DUP 0x02
59 #define DS_INV_HDL 0x03
60 #define DS_TYPE_UNKNOWN 0x04
68 struct ds_msg_tag tag;
69 struct ds_version ver;
73 struct ds_msg_tag tag;
78 struct ds_msg_tag tag;
83 struct ds_msg_tag tag;
91 struct ds_msg_tag tag;
97 struct ds_msg_tag tag;
102 struct ds_unreg_req {
103 struct ds_msg_tag tag;
107 struct ds_unreg_ack {
108 struct ds_msg_tag tag;
112 struct ds_unreg_nack {
113 struct ds_msg_tag tag;
118 struct ds_msg_tag tag;
122 struct ds_data_nack {
123 struct ds_msg_tag tag;
128 struct ds_cap_state {
131 void (*data)(struct ldc_channel *lp,
132 struct ds_cap_state *cp,
135 const char *service_id;
138 #define CAP_STATE_UNKNOWN 0x00
139 #define CAP_STATE_REG_SENT 0x01
140 #define CAP_STATE_REGISTERED 0x02
143 static void md_update_data(struct ldc_channel *lp, struct ds_cap_state *cp,
145 static void domain_shutdown_data(struct ldc_channel *lp,
146 struct ds_cap_state *cp,
148 static void domain_panic_data(struct ldc_channel *lp,
149 struct ds_cap_state *cp,
151 #ifdef CONFIG_HOTPLUG_CPU
152 static void dr_cpu_data(struct ldc_channel *lp,
153 struct ds_cap_state *cp,
156 static void ds_pri_data(struct ldc_channel *lp,
157 struct ds_cap_state *cp,
159 static void ds_var_data(struct ldc_channel *lp,
160 struct ds_cap_state *cp,
163 struct ds_cap_state ds_states[] = {
165 .service_id = "md-update",
166 .data = md_update_data,
169 .service_id = "domain-shutdown",
170 .data = domain_shutdown_data,
173 .service_id = "domain-panic",
174 .data = domain_panic_data,
176 #ifdef CONFIG_HOTPLUG_CPU
178 .service_id = "dr-cpu",
187 .service_id = "var-config",
191 .service_id = "var-config-backup",
196 static DEFINE_SPINLOCK(ds_lock);
199 struct ldc_channel *lp;
201 #define DS_HS_START 0x01
202 #define DS_HS_DONE 0x02
208 static struct ds_info *ds_info;
210 static struct ds_cap_state *find_cap(u64 handle)
212 unsigned int index = handle >> 32;
214 if (index >= ARRAY_SIZE(ds_states))
216 return &ds_states[index];
219 static struct ds_cap_state *find_cap_by_string(const char *name)
223 for (i = 0; i < ARRAY_SIZE(ds_states); i++) {
224 if (strcmp(ds_states[i].service_id, name))
227 return &ds_states[i];
232 static int ds_send(struct ldc_channel *lp, void *data, int len)
234 int err, limit = 1000;
237 while (limit-- > 0) {
238 err = ldc_write(lp, data, len);
239 if (!err || (err != -EAGAIN))
247 struct ds_md_update_req {
251 struct ds_md_update_res {
256 static void md_update_data(struct ldc_channel *lp,
257 struct ds_cap_state *dp,
260 struct ds_data *dpkt = buf;
261 struct ds_md_update_req *rp;
264 struct ds_md_update_res res;
267 rp = (struct ds_md_update_req *) (dpkt + 1);
269 printk(KERN_INFO PFX "Machine description update.\n");
271 memset(&pkt, 0, sizeof(pkt));
272 pkt.data.tag.type = DS_DATA;
273 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
274 pkt.data.handle = dp->handle;
275 pkt.res.req_num = rp->req_num;
276 pkt.res.result = DS_OK;
278 ds_send(lp, &pkt, sizeof(pkt));
283 struct ds_shutdown_req {
288 struct ds_shutdown_res {
294 static void domain_shutdown_data(struct ldc_channel *lp,
295 struct ds_cap_state *dp,
298 struct ds_data *dpkt = buf;
299 struct ds_shutdown_req *rp;
302 struct ds_shutdown_res res;
305 rp = (struct ds_shutdown_req *) (dpkt + 1);
307 printk(KERN_ALERT PFX "Shutdown request from "
308 "LDOM manager received.\n");
310 memset(&pkt, 0, sizeof(pkt));
311 pkt.data.tag.type = DS_DATA;
312 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
313 pkt.data.handle = dp->handle;
314 pkt.res.req_num = rp->req_num;
315 pkt.res.result = DS_OK;
316 pkt.res.reason[0] = 0;
318 ds_send(lp, &pkt, sizeof(pkt));
323 struct ds_panic_req {
327 struct ds_panic_res {
333 static void domain_panic_data(struct ldc_channel *lp,
334 struct ds_cap_state *dp,
337 struct ds_data *dpkt = buf;
338 struct ds_panic_req *rp;
341 struct ds_panic_res res;
344 rp = (struct ds_panic_req *) (dpkt + 1);
346 printk(KERN_ALERT PFX "Panic request from "
347 "LDOM manager received.\n");
349 memset(&pkt, 0, sizeof(pkt));
350 pkt.data.tag.type = DS_DATA;
351 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
352 pkt.data.handle = dp->handle;
353 pkt.res.req_num = rp->req_num;
354 pkt.res.result = DS_OK;
355 pkt.res.reason[0] = 0;
357 ds_send(lp, &pkt, sizeof(pkt));
359 panic("PANIC requested by LDOM manager.");
362 #ifdef CONFIG_HOTPLUG_CPU
366 #define DR_CPU_CONFIGURE 0x43
367 #define DR_CPU_UNCONFIGURE 0x55
368 #define DR_CPU_FORCE_UNCONFIGURE 0x46
369 #define DR_CPU_STATUS 0x53
372 #define DR_CPU_OK 0x6f
373 #define DR_CPU_ERROR 0x65
378 struct dr_cpu_resp_entry {
381 #define DR_CPU_RES_OK 0x00
382 #define DR_CPU_RES_FAILURE 0x01
383 #define DR_CPU_RES_BLOCKED 0x02
384 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
385 #define DR_CPU_RES_NOT_IN_MD 0x04
388 #define DR_CPU_STAT_NOT_PRESENT 0x00
389 #define DR_CPU_STAT_UNCONFIGURED 0x01
390 #define DR_CPU_STAT_CONFIGURED 0x02
395 /* XXX Put this in some common place. XXX */
396 static unsigned long kimage_addr_to_ra(void *p)
398 unsigned long val = (unsigned long) p;
400 return kern_base + (val - KERNBASE);
403 /* DR cpu requests get queued onto the work list by the
404 * dr_cpu_data() callback. The list is protected by
405 * ds_lock, and processed by dr_cpu_process() in order.
407 static LIST_HEAD(dr_cpu_work_list);
409 struct dr_cpu_queue_entry {
410 struct list_head list;
414 static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
416 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
417 struct ds_info *dp = ds_info;
420 struct dr_cpu_tag tag;
424 memset(&pkt, 0, sizeof(pkt));
425 pkt.data.tag.type = DS_DATA;
426 pkt.data.handle = cp->handle;
427 pkt.tag.req_num = tag->req_num;
428 pkt.tag.type = DR_CPU_ERROR;
429 pkt.tag.num_records = 0;
431 msg_len = (sizeof(struct ds_data) +
432 sizeof(struct dr_cpu_tag));
434 pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
436 ds_send(dp->lp, &pkt, msg_len);
439 static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
443 spin_lock_irqsave(&ds_lock, flags);
444 __dr_cpu_send_error(cp, data);
445 spin_unlock_irqrestore(&ds_lock, flags);
448 #define CPU_SENTINEL 0xffffffff
450 static void purge_dups(u32 *list, u32 num_ents)
454 for (i = 0; i < num_ents; i++) {
458 if (cpu == CPU_SENTINEL)
461 for (j = i + 1; j < num_ents; j++) {
463 list[j] = CPU_SENTINEL;
468 static int dr_cpu_size_response(int ncpus)
470 return (sizeof(struct ds_data) +
471 sizeof(struct dr_cpu_tag) +
472 (sizeof(struct dr_cpu_resp_entry) * ncpus));
475 static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
476 u64 handle, int resp_len, int ncpus,
477 cpumask_t *mask, u32 default_stat)
479 struct dr_cpu_resp_entry *ent;
480 struct dr_cpu_tag *tag;
483 tag = (struct dr_cpu_tag *) (resp + 1);
484 ent = (struct dr_cpu_resp_entry *) (tag + 1);
486 resp->tag.type = DS_DATA;
487 resp->tag.len = resp_len - sizeof(struct ds_msg_tag);
488 resp->handle = handle;
489 tag->req_num = req_num;
490 tag->type = DR_CPU_OK;
491 tag->num_records = ncpus;
494 for_each_cpu_mask(cpu, *mask) {
496 ent[i].result = DR_CPU_RES_OK;
497 ent[i].stat = default_stat;
503 static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
506 struct dr_cpu_resp_entry *ent;
507 struct dr_cpu_tag *tag;
510 tag = (struct dr_cpu_tag *) (resp + 1);
511 ent = (struct dr_cpu_resp_entry *) (tag + 1);
513 for (i = 0; i < ncpus; i++) {
514 if (ent[i].cpu != cpu)
522 static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
525 struct ds_data *resp;
526 int resp_len, ncpus, cpu;
529 ncpus = cpus_weight(*mask);
530 resp_len = dr_cpu_size_response(ncpus);
531 resp = kzalloc(resp_len, GFP_KERNEL);
535 dr_cpu_init_response(resp, req_num, cp->handle,
536 resp_len, ncpus, mask,
537 DR_CPU_STAT_CONFIGURED);
539 mdesc_fill_in_cpu_data(*mask);
541 for_each_cpu_mask(cpu, *mask) {
544 printk(KERN_INFO PFX "Starting cpu %d...\n", cpu);
547 dr_cpu_mark(resp, cpu, ncpus,
549 DR_CPU_STAT_UNCONFIGURED);
552 spin_lock_irqsave(&ds_lock, flags);
553 ds_send(ds_info->lp, resp, resp_len);
554 spin_unlock_irqrestore(&ds_lock, flags);
561 static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
564 struct ds_data *resp;
567 ncpus = cpus_weight(*mask);
568 resp_len = dr_cpu_size_response(ncpus);
569 resp = kzalloc(resp_len, GFP_KERNEL);
573 dr_cpu_init_response(resp, req_num, cp->handle,
574 resp_len, ncpus, mask,
575 DR_CPU_STAT_UNCONFIGURED);
582 static void dr_cpu_process(struct work_struct *work)
584 struct dr_cpu_queue_entry *qp, *tmp;
585 struct ds_cap_state *cp;
590 cp = find_cap_by_string("dr-cpu");
592 spin_lock_irqsave(&ds_lock, flags);
593 list_splice(&dr_cpu_work_list, &todo);
594 spin_unlock_irqrestore(&ds_lock, flags);
596 list_for_each_entry_safe(qp, tmp, &todo, list) {
597 struct ds_data *data = (struct ds_data *) qp->req;
598 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
599 u32 *cpu_list = (u32 *) (tag + 1);
600 u64 req_num = tag->req_num;
605 case DR_CPU_CONFIGURE:
606 case DR_CPU_UNCONFIGURE:
607 case DR_CPU_FORCE_UNCONFIGURE:
611 dr_cpu_send_error(cp, data);
615 purge_dups(cpu_list, tag->num_records);
618 for (i = 0; i < tag->num_records; i++) {
619 if (cpu_list[i] == CPU_SENTINEL)
622 if (cpu_list[i] < NR_CPUS)
623 cpu_set(cpu_list[i], mask);
626 if (tag->type == DR_CPU_CONFIGURE)
627 err = dr_cpu_configure(cp, req_num, &mask);
629 err = dr_cpu_unconfigure(cp, req_num, &mask);
632 dr_cpu_send_error(cp, data);
640 static DECLARE_WORK(dr_cpu_work, dr_cpu_process);
642 static void dr_cpu_data(struct ldc_channel *lp,
643 struct ds_cap_state *dp,
646 struct dr_cpu_queue_entry *qp;
647 struct ds_data *dpkt = buf;
648 struct dr_cpu_tag *rp;
650 rp = (struct dr_cpu_tag *) (dpkt + 1);
652 qp = kmalloc(sizeof(struct dr_cpu_queue_entry) + len, GFP_ATOMIC);
654 struct ds_cap_state *cp;
656 cp = find_cap_by_string("dr-cpu");
657 __dr_cpu_send_error(cp, dpkt);
659 memcpy(&qp->req, buf, len);
660 list_add_tail(&qp->list, &dr_cpu_work_list);
661 schedule_work(&dr_cpu_work);
669 #define DS_PRI_REQUEST 0x00
670 #define DS_PRI_DATA 0x01
671 #define DS_PRI_UPDATE 0x02
674 static void ds_pri_data(struct ldc_channel *lp,
675 struct ds_cap_state *dp,
678 struct ds_data *dpkt = buf;
679 struct ds_pri_msg *rp;
681 rp = (struct ds_pri_msg *) (dpkt + 1);
683 printk(KERN_INFO PFX "PRI REQ [%lx:%lx], len=%d\n",
684 rp->req_num, rp->type, len);
689 #define DS_VAR_SET_REQ 0x00
690 #define DS_VAR_DELETE_REQ 0x01
691 #define DS_VAR_SET_RESP 0x02
692 #define DS_VAR_DELETE_RESP 0x03
695 struct ds_var_set_msg {
696 struct ds_var_hdr hdr;
697 char name_and_value[0];
700 struct ds_var_delete_msg {
701 struct ds_var_hdr hdr;
706 struct ds_var_hdr hdr;
708 #define DS_VAR_SUCCESS 0x00
709 #define DS_VAR_NO_SPACE 0x01
710 #define DS_VAR_INVALID_VAR 0x02
711 #define DS_VAR_INVALID_VAL 0x03
712 #define DS_VAR_NOT_PRESENT 0x04
715 static DEFINE_MUTEX(ds_var_mutex);
716 static int ds_var_doorbell;
717 static int ds_var_response;
719 static void ds_var_data(struct ldc_channel *lp,
720 struct ds_cap_state *dp,
723 struct ds_data *dpkt = buf;
724 struct ds_var_resp *rp;
726 rp = (struct ds_var_resp *) (dpkt + 1);
728 if (rp->hdr.type != DS_VAR_SET_RESP &&
729 rp->hdr.type != DS_VAR_DELETE_RESP)
732 ds_var_response = rp->result;
737 void ldom_set_var(const char *var, const char *value)
739 struct ds_info *dp = ds_info;
740 struct ds_cap_state *cp;
742 cp = find_cap_by_string("var-config");
743 if (cp->state != CAP_STATE_REGISTERED)
744 cp = find_cap_by_string("var-config-backup");
746 if (cp->state == CAP_STATE_REGISTERED) {
750 struct ds_var_set_msg msg;
758 memset(&pkt, 0, sizeof(pkt));
759 pkt.header.data.tag.type = DS_DATA;
760 pkt.header.data.handle = cp->handle;
761 pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
762 base = p = &pkt.header.msg.name_and_value[0];
764 p += strlen(var) + 1;
766 p += strlen(value) + 1;
768 msg_len = (sizeof(struct ds_data) +
769 sizeof(struct ds_var_set_msg) +
771 msg_len = (msg_len + 3) & ~3;
772 pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
774 mutex_lock(&ds_var_mutex);
776 spin_lock_irqsave(&ds_lock, flags);
778 ds_var_response = -1;
780 ds_send(dp->lp, &pkt, msg_len);
781 spin_unlock_irqrestore(&ds_lock, flags);
784 while (ds_var_doorbell == 0) {
791 mutex_unlock(&ds_var_mutex);
793 if (ds_var_doorbell == 0 ||
794 ds_var_response != DS_VAR_SUCCESS)
795 printk(KERN_ERR PFX "var-config [%s:%s] "
796 "failed, response(%d).\n",
800 printk(KERN_ERR PFX "var-config not registered so "
801 "could not set (%s) variable to (%s).\n",
806 void ldom_reboot(const char *boot_command)
808 /* Don't bother with any of this if the boot_command
811 if (boot_command && strlen(boot_command)) {
812 char full_boot_str[256];
814 strcpy(full_boot_str, "boot ");
815 strcpy(full_boot_str + strlen("boot "), boot_command);
817 ldom_set_var("reboot-command", full_boot_str);
822 void ldom_power_off(void)
827 static void ds_conn_reset(struct ds_info *dp)
829 printk(KERN_ERR PFX "ds_conn_reset() from %p\n",
830 __builtin_return_address(0));
833 static int register_services(struct ds_info *dp)
835 struct ldc_channel *lp = dp->lp;
838 for (i = 0; i < ARRAY_SIZE(ds_states); i++) {
840 struct ds_reg_req req;
843 struct ds_cap_state *cp = &ds_states[i];
847 if (cp->state == CAP_STATE_REGISTERED)
850 new_count = sched_clock() & 0xffffffff;
851 cp->handle = ((u64) i << 32) | new_count;
853 msg_len = (sizeof(struct ds_reg_req) +
854 strlen(cp->service_id));
856 memset(&pbuf, 0, sizeof(pbuf));
857 pbuf.req.tag.type = DS_REG_REQ;
858 pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
859 pbuf.req.handle = cp->handle;
862 strcpy(pbuf.req.svc_id, cp->service_id);
864 err = ds_send(lp, &pbuf, msg_len);
866 cp->state = CAP_STATE_REG_SENT;
871 static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
874 if (dp->hs_state == DS_HS_START) {
875 if (pkt->type != DS_INIT_ACK)
878 dp->hs_state = DS_HS_DONE;
880 return register_services(dp);
883 if (dp->hs_state != DS_HS_DONE)
886 if (pkt->type == DS_REG_ACK) {
887 struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
888 struct ds_cap_state *cp = find_cap(ap->handle);
891 printk(KERN_ERR PFX "REG ACK for unknown handle %lx\n",
895 printk(KERN_INFO PFX "Registered %s service.\n",
897 cp->state = CAP_STATE_REGISTERED;
898 } else if (pkt->type == DS_REG_NACK) {
899 struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
900 struct ds_cap_state *cp = find_cap(np->handle);
903 printk(KERN_ERR PFX "REG NACK for "
904 "unknown handle %lx\n",
908 printk(KERN_INFO PFX "Could not register %s service\n",
910 cp->state = CAP_STATE_UNKNOWN;
920 static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
922 struct ds_data *dpkt = (struct ds_data *) pkt;
923 struct ds_cap_state *cp = find_cap(dpkt->handle);
926 struct ds_data_nack nack = {
929 .len = (sizeof(struct ds_data_nack) -
930 sizeof(struct ds_msg_tag)),
932 .handle = dpkt->handle,
933 .result = DS_INV_HDL,
936 printk(KERN_ERR PFX "Data for unknown handle %lu\n",
938 ds_send(dp->lp, &nack, sizeof(nack));
940 cp->data(dp->lp, cp, dpkt, len);
945 static void ds_up(struct ds_info *dp)
947 struct ldc_channel *lp = dp->lp;
948 struct ds_ver_req req;
951 req.tag.type = DS_INIT_REQ;
952 req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
956 err = ds_send(lp, &req, sizeof(req));
958 dp->hs_state = DS_HS_START;
961 static void ds_event(void *arg, int event)
963 struct ds_info *dp = arg;
964 struct ldc_channel *lp = dp->lp;
968 spin_lock_irqsave(&ds_lock, flags);
970 if (event == LDC_EVENT_UP) {
972 spin_unlock_irqrestore(&ds_lock, flags);
976 if (event != LDC_EVENT_DATA_READY) {
977 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
978 spin_unlock_irqrestore(&ds_lock, flags);
984 struct ds_msg_tag *tag;
986 err = ldc_read(lp, dp->rcv_buf, sizeof(*tag));
988 if (unlikely(err < 0)) {
989 if (err == -ECONNRESET)
997 err = ldc_read(lp, tag + 1, tag->len);
999 if (unlikely(err < 0)) {
1000 if (err == -ECONNRESET)
1007 if (tag->type < DS_DATA)
1008 err = ds_handshake(dp, dp->rcv_buf);
1010 err = ds_data(dp, dp->rcv_buf,
1011 sizeof(*tag) + err);
1012 if (err == -ECONNRESET)
1016 spin_unlock_irqrestore(&ds_lock, flags);
1019 static int __devinit ds_probe(struct vio_dev *vdev,
1020 const struct vio_device_id *id)
1022 static int ds_version_printed;
1023 struct ldc_channel_config ds_cfg = {
1026 .mode = LDC_MODE_STREAM,
1028 struct ldc_channel *lp;
1032 if (ds_version_printed++ == 0)
1033 printk(KERN_INFO "%s", version);
1035 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1040 dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
1044 dp->rcv_buf_len = 4096;
1046 ds_cfg.tx_irq = vdev->tx_irq;
1047 ds_cfg.rx_irq = vdev->rx_irq;
1049 lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
1052 goto out_free_rcv_buf;
1056 err = ldc_bind(lp, "DS");
1079 static int ds_remove(struct vio_dev *vdev)
1084 static struct vio_device_id ds_match[] = {
1086 .type = "domain-services-port",
1091 static struct vio_driver ds_driver = {
1092 .id_table = ds_match,
1094 .remove = ds_remove,
1097 .owner = THIS_MODULE,
1101 static int __init ds_init(void)
1105 for (i = 0; i < ARRAY_SIZE(ds_states); i++)
1106 ds_states[i].handle = ((u64)i << 32);
1108 return vio_register_driver(&ds_driver);
1111 subsys_initcall(ds_init);