Merge head 'upstream' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
[pandora-kernel.git] / drivers / scsi / hosts.c
1 /*
2  *  hosts.c Copyright (C) 1992 Drew Eckhardt
3  *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
4  *          Copyright (C) 2002-2003 Christoph Hellwig
5  *
6  *  mid to lowlevel SCSI driver interface
7  *      Initial versions: Drew Eckhardt
8  *      Subsequent revisions: Eric Youngdale
9  *
10  *  <drew@colorado.edu>
11  *
12  *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
13  *  Added QLOGIC QLA1280 SCSI controller kernel host support. 
14  *     August 4, 1999 Fred Lewis, Intel DuPont
15  *
16  *  Updated to reflect the new initialization scheme for the higher 
17  *  level of scsi drivers (sd/sr/st)
18  *  September 17, 2000 Torben Mathiasen <tmm@image.dk>
19  *
20  *  Restructured scsi_host lists and associated functions.
21  *  September 04, 2002 Mike Anderson (andmike@us.ibm.com)
22  */
23
24 #include <linux/module.h>
25 #include <linux/blkdev.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/init.h>
30 #include <linux/completion.h>
31 #include <linux/transport_class.h>
32
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport.h>
36
37 #include "scsi_priv.h"
38 #include "scsi_logging.h"
39
40
41 static int scsi_host_next_hn;           /* host_no for next new host */
42
43
44 static void scsi_host_cls_release(struct class_device *class_dev)
45 {
46         put_device(&class_to_shost(class_dev)->shost_gendev);
47 }
48
49 static struct class shost_class = {
50         .name           = "scsi_host",
51         .release        = scsi_host_cls_release,
52 };
53
54 /**
55  * scsi_host_cancel - cancel outstanding IO to this host
56  * @shost:      pointer to struct Scsi_Host
57  * recovery:    recovery requested to run.
58  **/
59 static void scsi_host_cancel(struct Scsi_Host *shost, int recovery)
60 {
61         struct scsi_device *sdev;
62
63         set_bit(SHOST_CANCEL, &shost->shost_state);
64         shost_for_each_device(sdev, shost) {
65                 scsi_device_cancel(sdev, recovery);
66         }
67         wait_event(shost->host_wait, (!test_bit(SHOST_RECOVERY,
68                                                 &shost->shost_state)));
69 }
70
71 /**
72  * scsi_remove_host - remove a scsi host
73  * @shost:      a pointer to a scsi host to remove
74  **/
75 void scsi_remove_host(struct Scsi_Host *shost)
76 {
77         scsi_forget_host(shost);
78         scsi_host_cancel(shost, 0);
79         scsi_proc_host_rm(shost);
80
81         set_bit(SHOST_DEL, &shost->shost_state);
82
83         transport_unregister_device(&shost->shost_gendev);
84         class_device_unregister(&shost->shost_classdev);
85         device_del(&shost->shost_gendev);
86 }
87 EXPORT_SYMBOL(scsi_remove_host);
88
89 /**
90  * scsi_add_host - add a scsi host
91  * @shost:      scsi host pointer to add
92  * @dev:        a struct device of type scsi class
93  *
94  * Return value: 
95  *      0 on success / != 0 for error
96  **/
97 int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
98 {
99         struct scsi_host_template *sht = shost->hostt;
100         int error = -EINVAL;
101
102         printk(KERN_INFO "scsi%d : %s\n", shost->host_no,
103                         sht->info ? sht->info(shost) : sht->name);
104
105         if (!shost->can_queue) {
106                 printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
107                                 sht->name);
108                 goto out;
109         }
110
111         if (!shost->shost_gendev.parent)
112                 shost->shost_gendev.parent = dev ? dev : &platform_bus;
113
114         error = device_add(&shost->shost_gendev);
115         if (error)
116                 goto out;
117
118         set_bit(SHOST_ADD, &shost->shost_state);
119         get_device(shost->shost_gendev.parent);
120
121         error = class_device_add(&shost->shost_classdev);
122         if (error)
123                 goto out_del_gendev;
124
125         get_device(&shost->shost_gendev);
126
127         if (shost->transportt->host_size &&
128             (shost->shost_data = kmalloc(shost->transportt->host_size,
129                                          GFP_KERNEL)) == NULL)
130                 goto out_del_classdev;
131
132         if (shost->transportt->create_work_queue) {
133                 snprintf(shost->work_q_name, KOBJ_NAME_LEN, "scsi_wq_%d",
134                         shost->host_no);
135                 shost->work_q = create_singlethread_workqueue(
136                                         shost->work_q_name);
137                 if (!shost->work_q)
138                         goto out_free_shost_data;
139         }
140
141         error = scsi_sysfs_add_host(shost);
142         if (error)
143                 goto out_destroy_host;
144
145         scsi_proc_host_add(shost);
146         return error;
147
148  out_destroy_host:
149         if (shost->work_q)
150                 destroy_workqueue(shost->work_q);
151  out_free_shost_data:
152         kfree(shost->shost_data);
153  out_del_classdev:
154         class_device_del(&shost->shost_classdev);
155  out_del_gendev:
156         device_del(&shost->shost_gendev);
157  out:
158         return error;
159 }
160 EXPORT_SYMBOL(scsi_add_host);
161
162 static void scsi_host_dev_release(struct device *dev)
163 {
164         struct Scsi_Host *shost = dev_to_shost(dev);
165         struct device *parent = dev->parent;
166
167         if (shost->ehandler) {
168                 DECLARE_COMPLETION(sem);
169                 shost->eh_notify = &sem;
170                 shost->eh_kill = 1;
171                 up(shost->eh_wait);
172                 wait_for_completion(&sem);
173                 shost->eh_notify = NULL;
174         }
175
176         if (shost->work_q)
177                 destroy_workqueue(shost->work_q);
178
179         scsi_proc_hostdir_rm(shost->hostt);
180         scsi_destroy_command_freelist(shost);
181         kfree(shost->shost_data);
182
183         if (parent)
184                 put_device(parent);
185         kfree(shost);
186 }
187
188 /**
189  * scsi_host_alloc - register a scsi host adapter instance.
190  * @sht:        pointer to scsi host template
191  * @privsize:   extra bytes to allocate for driver
192  *
193  * Note:
194  *      Allocate a new Scsi_Host and perform basic initialization.
195  *      The host is not published to the scsi midlayer until scsi_add_host
196  *      is called.
197  *
198  * Return value:
199  *      Pointer to a new Scsi_Host
200  **/
201 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
202 {
203         struct Scsi_Host *shost;
204         int gfp_mask = GFP_KERNEL, rval;
205         DECLARE_COMPLETION(complete);
206
207         if (sht->unchecked_isa_dma && privsize)
208                 gfp_mask |= __GFP_DMA;
209
210         /* Check to see if this host has any error handling facilities */
211         if (!sht->eh_strategy_handler && !sht->eh_abort_handler &&
212             !sht->eh_device_reset_handler && !sht->eh_bus_reset_handler &&
213             !sht->eh_host_reset_handler) {
214                 printk(KERN_ERR "ERROR: SCSI host `%s' has no error handling\n"
215                                 "ERROR: This is not a safe way to run your "
216                                         "SCSI host\n"
217                                 "ERROR: The error handling must be added to "
218                                 "this driver\n", sht->proc_name);
219                 dump_stack();
220         }
221
222         shost = kmalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
223         if (!shost)
224                 return NULL;
225         memset(shost, 0, sizeof(struct Scsi_Host) + privsize);
226
227         spin_lock_init(&shost->default_lock);
228         scsi_assign_lock(shost, &shost->default_lock);
229         INIT_LIST_HEAD(&shost->__devices);
230         INIT_LIST_HEAD(&shost->__targets);
231         INIT_LIST_HEAD(&shost->eh_cmd_q);
232         INIT_LIST_HEAD(&shost->starved_list);
233         init_waitqueue_head(&shost->host_wait);
234
235         init_MUTEX(&shost->scan_mutex);
236
237         shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */
238         shost->dma_channel = 0xff;
239
240         /* These three are default values which can be overridden */
241         shost->max_channel = 0;
242         shost->max_id = 8;
243         shost->max_lun = 8;
244
245         /* Give each shost a default transportt */
246         shost->transportt = &blank_transport_template;
247
248         /*
249          * All drivers right now should be able to handle 12 byte
250          * commands.  Every so often there are requests for 16 byte
251          * commands, but individual low-level drivers need to certify that
252          * they actually do something sensible with such commands.
253          */
254         shost->max_cmd_len = 12;
255         shost->hostt = sht;
256         shost->this_id = sht->this_id;
257         shost->can_queue = sht->can_queue;
258         shost->sg_tablesize = sht->sg_tablesize;
259         shost->cmd_per_lun = sht->cmd_per_lun;
260         shost->unchecked_isa_dma = sht->unchecked_isa_dma;
261         shost->use_clustering = sht->use_clustering;
262         shost->ordered_flush = sht->ordered_flush;
263         shost->ordered_tag = sht->ordered_tag;
264
265         /*
266          * hosts/devices that do queueing must support ordered tags
267          */
268         if (shost->can_queue > 1 && shost->ordered_flush) {
269                 printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
270                 shost->ordered_flush = 0;
271         }
272
273         if (sht->max_host_blocked)
274                 shost->max_host_blocked = sht->max_host_blocked;
275         else
276                 shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
277
278         /*
279          * If the driver imposes no hard sector transfer limit, start at
280          * machine infinity initially.
281          */
282         if (sht->max_sectors)
283                 shost->max_sectors = sht->max_sectors;
284         else
285                 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
286
287         /*
288          * assume a 4GB boundary, if not set
289          */
290         if (sht->dma_boundary)
291                 shost->dma_boundary = sht->dma_boundary;
292         else
293                 shost->dma_boundary = 0xffffffff;
294
295         rval = scsi_setup_command_freelist(shost);
296         if (rval)
297                 goto fail_kfree;
298
299         device_initialize(&shost->shost_gendev);
300         snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
301                 shost->host_no);
302         shost->shost_gendev.release = scsi_host_dev_release;
303
304         class_device_initialize(&shost->shost_classdev);
305         shost->shost_classdev.dev = &shost->shost_gendev;
306         shost->shost_classdev.class = &shost_class;
307         snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d",
308                   shost->host_no);
309
310         shost->eh_notify = &complete;
311         rval = kernel_thread(scsi_error_handler, shost, 0);
312         if (rval < 0)
313                 goto fail_destroy_freelist;
314         wait_for_completion(&complete);
315         shost->eh_notify = NULL;
316
317         scsi_proc_hostdir_add(shost->hostt);
318         return shost;
319
320  fail_destroy_freelist:
321         scsi_destroy_command_freelist(shost);
322  fail_kfree:
323         kfree(shost);
324         return NULL;
325 }
326 EXPORT_SYMBOL(scsi_host_alloc);
327
328 struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
329 {
330         struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
331
332         if (!sht->detect) {
333                 printk(KERN_WARNING "scsi_register() called on new-style "
334                                     "template for driver %s\n", sht->name);
335                 dump_stack();
336         }
337
338         if (shost)
339                 list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
340         return shost;
341 }
342 EXPORT_SYMBOL(scsi_register);
343
344 void scsi_unregister(struct Scsi_Host *shost)
345 {
346         list_del(&shost->sht_legacy_list);
347         scsi_host_put(shost);
348 }
349 EXPORT_SYMBOL(scsi_unregister);
350
351 /**
352  * scsi_host_lookup - get a reference to a Scsi_Host by host no
353  *
354  * @hostnum:    host number to locate
355  *
356  * Return value:
357  *      A pointer to located Scsi_Host or NULL.
358  **/
359 struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
360 {
361         struct class *class = &shost_class;
362         struct class_device *cdev;
363         struct Scsi_Host *shost = ERR_PTR(-ENXIO), *p;
364
365         down_read(&class->subsys.rwsem);
366         list_for_each_entry(cdev, &class->children, node) {
367                 p = class_to_shost(cdev);
368                 if (p->host_no == hostnum) {
369                         shost = scsi_host_get(p);
370                         break;
371                 }
372         }
373         up_read(&class->subsys.rwsem);
374
375         return shost;
376 }
377 EXPORT_SYMBOL(scsi_host_lookup);
378
379 /**
380  * scsi_host_get - inc a Scsi_Host ref count
381  * @shost:      Pointer to Scsi_Host to inc.
382  **/
383 struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
384 {
385         if (test_bit(SHOST_DEL, &shost->shost_state) ||
386                 !get_device(&shost->shost_gendev))
387                 return NULL;
388         return shost;
389 }
390 EXPORT_SYMBOL(scsi_host_get);
391
392 /**
393  * scsi_host_put - dec a Scsi_Host ref count
394  * @shost:      Pointer to Scsi_Host to dec.
395  **/
396 void scsi_host_put(struct Scsi_Host *shost)
397 {
398         put_device(&shost->shost_gendev);
399 }
400 EXPORT_SYMBOL(scsi_host_put);
401
402 int scsi_init_hosts(void)
403 {
404         return class_register(&shost_class);
405 }
406
407 void scsi_exit_hosts(void)
408 {
409         class_unregister(&shost_class);
410 }
411
412 int scsi_is_host_device(const struct device *dev)
413 {
414         return dev->release == scsi_host_dev_release;
415 }
416 EXPORT_SYMBOL(scsi_is_host_device);
417
418 /**
419  * scsi_queue_work - Queue work to the Scsi_Host workqueue.
420  * @shost:      Pointer to Scsi_Host.
421  * @work:       Work to queue for execution.
422  *
423  * Return value:
424  *      0 on success / != 0 for error
425  **/
426 int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
427 {
428         if (unlikely(!shost->work_q)) {
429                 printk(KERN_ERR
430                         "ERROR: Scsi host '%s' attempted to queue scsi-work, "
431                         "when no workqueue created.\n", shost->hostt->name);
432                 dump_stack();
433
434                 return -EINVAL;
435         }
436
437         return queue_work(shost->work_q, work);
438 }
439 EXPORT_SYMBOL_GPL(scsi_queue_work);
440
441 /**
442  * scsi_flush_work - Flush a Scsi_Host's workqueue.
443  * @shost:      Pointer to Scsi_Host.
444  **/
445 void scsi_flush_work(struct Scsi_Host *shost)
446 {
447         if (!shost->work_q) {
448                 printk(KERN_ERR
449                         "ERROR: Scsi host '%s' attempted to flush scsi-work, "
450                         "when no workqueue created.\n", shost->hostt->name);
451                 dump_stack();
452                 return;
453         }
454
455         flush_workqueue(shost->work_q);
456 }
457 EXPORT_SYMBOL_GPL(scsi_flush_work);