Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes
[pandora-kernel.git] / drivers / s390 / block / dasd.c
1 /*
2  * File...........: linux/drivers/s390/block/dasd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *                  Horst Hummel <Horst.Hummel@de.ibm.com>
5  *                  Carsten Otte <Cotte@de.ibm.com>
6  *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9  *
10  */
11
12 #include <linux/kmod.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/ctype.h>
16 #include <linux/major.h>
17 #include <linux/slab.h>
18 #include <linux/buffer_head.h>
19 #include <linux/hdreg.h>
20
21 #include <asm/ccwdev.h>
22 #include <asm/ebcdic.h>
23 #include <asm/idals.h>
24 #include <asm/todclk.h>
25
26 /* This is ugly... */
27 #define PRINTK_HEADER "dasd:"
28
29 #include "dasd_int.h"
30 /*
31  * SECTION: Constant definitions to be used within this file
32  */
33 #define DASD_CHANQ_MAX_SIZE 4
34
35 /*
36  * SECTION: exported variables of dasd.c
37  */
38 debug_info_t *dasd_debug_area;
39 struct dasd_discipline *dasd_diag_discipline_pointer;
40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
41
42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44                    " Copyright 2000 IBM Corporation");
45 MODULE_SUPPORTED_DEVICE("dasd");
46 MODULE_LICENSE("GPL");
47
48 /*
49  * SECTION: prototypes for static functions of dasd.c
50  */
51 static int  dasd_alloc_queue(struct dasd_block *);
52 static void dasd_setup_queue(struct dasd_block *);
53 static void dasd_free_queue(struct dasd_block *);
54 static void dasd_flush_request_queue(struct dasd_block *);
55 static int dasd_flush_block_queue(struct dasd_block *);
56 static void dasd_device_tasklet(struct dasd_device *);
57 static void dasd_block_tasklet(struct dasd_block *);
58 static void do_kick_device(struct work_struct *);
59 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
60
61 /*
62  * SECTION: Operations on the device structure.
63  */
64 static wait_queue_head_t dasd_init_waitq;
65 static wait_queue_head_t dasd_flush_wq;
66
67 /*
68  * Allocate memory for a new device structure.
69  */
70 struct dasd_device *dasd_alloc_device(void)
71 {
72         struct dasd_device *device;
73
74         device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
75         if (!device)
76                 return ERR_PTR(-ENOMEM);
77
78         /* Get two pages for normal block device operations. */
79         device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
80         if (!device->ccw_mem) {
81                 kfree(device);
82                 return ERR_PTR(-ENOMEM);
83         }
84         /* Get one page for error recovery. */
85         device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
86         if (!device->erp_mem) {
87                 free_pages((unsigned long) device->ccw_mem, 1);
88                 kfree(device);
89                 return ERR_PTR(-ENOMEM);
90         }
91
92         dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
93         dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
94         spin_lock_init(&device->mem_lock);
95         atomic_set(&device->tasklet_scheduled, 0);
96         tasklet_init(&device->tasklet,
97                      (void (*)(unsigned long)) dasd_device_tasklet,
98                      (unsigned long) device);
99         INIT_LIST_HEAD(&device->ccw_queue);
100         init_timer(&device->timer);
101         INIT_WORK(&device->kick_work, do_kick_device);
102         device->state = DASD_STATE_NEW;
103         device->target = DASD_STATE_NEW;
104
105         return device;
106 }
107
108 /*
109  * Free memory of a device structure.
110  */
111 void dasd_free_device(struct dasd_device *device)
112 {
113         kfree(device->private);
114         free_page((unsigned long) device->erp_mem);
115         free_pages((unsigned long) device->ccw_mem, 1);
116         kfree(device);
117 }
118
119 /*
120  * Allocate memory for a new device structure.
121  */
122 struct dasd_block *dasd_alloc_block(void)
123 {
124         struct dasd_block *block;
125
126         block = kzalloc(sizeof(*block), GFP_ATOMIC);
127         if (!block)
128                 return ERR_PTR(-ENOMEM);
129         /* open_count = 0 means device online but not in use */
130         atomic_set(&block->open_count, -1);
131
132         spin_lock_init(&block->request_queue_lock);
133         atomic_set(&block->tasklet_scheduled, 0);
134         tasklet_init(&block->tasklet,
135                      (void (*)(unsigned long)) dasd_block_tasklet,
136                      (unsigned long) block);
137         INIT_LIST_HEAD(&block->ccw_queue);
138         spin_lock_init(&block->queue_lock);
139         init_timer(&block->timer);
140
141         return block;
142 }
143
144 /*
145  * Free memory of a device structure.
146  */
147 void dasd_free_block(struct dasd_block *block)
148 {
149         kfree(block);
150 }
151
152 /*
153  * Make a new device known to the system.
154  */
155 static int dasd_state_new_to_known(struct dasd_device *device)
156 {
157         int rc;
158
159         /*
160          * As long as the device is not in state DASD_STATE_NEW we want to
161          * keep the reference count > 0.
162          */
163         dasd_get_device(device);
164
165         if (device->block) {
166                 rc = dasd_alloc_queue(device->block);
167                 if (rc) {
168                         dasd_put_device(device);
169                         return rc;
170                 }
171         }
172         device->state = DASD_STATE_KNOWN;
173         return 0;
174 }
175
176 /*
177  * Let the system forget about a device.
178  */
179 static int dasd_state_known_to_new(struct dasd_device *device)
180 {
181         /* Disable extended error reporting for this device. */
182         dasd_eer_disable(device);
183         /* Forget the discipline information. */
184         if (device->discipline) {
185                 if (device->discipline->uncheck_device)
186                         device->discipline->uncheck_device(device);
187                 module_put(device->discipline->owner);
188         }
189         device->discipline = NULL;
190         if (device->base_discipline)
191                 module_put(device->base_discipline->owner);
192         device->base_discipline = NULL;
193         device->state = DASD_STATE_NEW;
194
195         if (device->block)
196                 dasd_free_queue(device->block);
197
198         /* Give up reference we took in dasd_state_new_to_known. */
199         dasd_put_device(device);
200         return 0;
201 }
202
203 /*
204  * Request the irq line for the device.
205  */
206 static int dasd_state_known_to_basic(struct dasd_device *device)
207 {
208         int rc;
209
210         /* Allocate and register gendisk structure. */
211         if (device->block) {
212                 rc = dasd_gendisk_alloc(device->block);
213                 if (rc)
214                         return rc;
215         }
216         /* register 'device' debug area, used for all DBF_DEV_XXX calls */
217         device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1,
218                                             8 * sizeof(long));
219         debug_register_view(device->debug_area, &debug_sprintf_view);
220         debug_set_level(device->debug_area, DBF_WARNING);
221         DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
222
223         device->state = DASD_STATE_BASIC;
224         return 0;
225 }
226
227 /*
228  * Release the irq line for the device. Terminate any running i/o.
229  */
230 static int dasd_state_basic_to_known(struct dasd_device *device)
231 {
232         int rc;
233         if (device->block) {
234                 dasd_gendisk_free(device->block);
235                 dasd_block_clear_timer(device->block);
236         }
237         rc = dasd_flush_device_queue(device);
238         if (rc)
239                 return rc;
240         dasd_device_clear_timer(device);
241
242         DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
243         if (device->debug_area != NULL) {
244                 debug_unregister(device->debug_area);
245                 device->debug_area = NULL;
246         }
247         device->state = DASD_STATE_KNOWN;
248         return 0;
249 }
250
251 /*
252  * Do the initial analysis. The do_analysis function may return
253  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
254  * until the discipline decides to continue the startup sequence
255  * by calling the function dasd_change_state. The eckd disciplines
256  * uses this to start a ccw that detects the format. The completion
257  * interrupt for this detection ccw uses the kernel event daemon to
258  * trigger the call to dasd_change_state. All this is done in the
259  * discipline code, see dasd_eckd.c.
260  * After the analysis ccw is done (do_analysis returned 0) the block
261  * device is setup.
262  * In case the analysis returns an error, the device setup is stopped
263  * (a fake disk was already added to allow formatting).
264  */
265 static int dasd_state_basic_to_ready(struct dasd_device *device)
266 {
267         int rc;
268         struct dasd_block *block;
269
270         rc = 0;
271         block = device->block;
272         /* make disk known with correct capacity */
273         if (block) {
274                 if (block->base->discipline->do_analysis != NULL)
275                         rc = block->base->discipline->do_analysis(block);
276                 if (rc) {
277                         if (rc != -EAGAIN)
278                                 device->state = DASD_STATE_UNFMT;
279                         return rc;
280                 }
281                 dasd_setup_queue(block);
282                 set_capacity(block->gdp,
283                              block->blocks << block->s2b_shift);
284                 device->state = DASD_STATE_READY;
285                 rc = dasd_scan_partitions(block);
286                 if (rc)
287                         device->state = DASD_STATE_BASIC;
288         } else {
289                 device->state = DASD_STATE_READY;
290         }
291         return rc;
292 }
293
294 /*
295  * Remove device from block device layer. Destroy dirty buffers.
296  * Forget format information. Check if the target level is basic
297  * and if it is create fake disk for formatting.
298  */
299 static int dasd_state_ready_to_basic(struct dasd_device *device)
300 {
301         int rc;
302
303         device->state = DASD_STATE_BASIC;
304         if (device->block) {
305                 struct dasd_block *block = device->block;
306                 rc = dasd_flush_block_queue(block);
307                 if (rc) {
308                         device->state = DASD_STATE_READY;
309                         return rc;
310                 }
311                 dasd_destroy_partitions(block);
312                 dasd_flush_request_queue(block);
313                 block->blocks = 0;
314                 block->bp_block = 0;
315                 block->s2b_shift = 0;
316         }
317         return 0;
318 }
319
320 /*
321  * Back to basic.
322  */
323 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
324 {
325         device->state = DASD_STATE_BASIC;
326         return 0;
327 }
328
329 /*
330  * Make the device online and schedule the bottom half to start
331  * the requeueing of requests from the linux request queue to the
332  * ccw queue.
333  */
334 static int
335 dasd_state_ready_to_online(struct dasd_device * device)
336 {
337         int rc;
338
339         if (device->discipline->ready_to_online) {
340                 rc = device->discipline->ready_to_online(device);
341                 if (rc)
342                         return rc;
343         }
344         device->state = DASD_STATE_ONLINE;
345         if (device->block)
346                 dasd_schedule_block_bh(device->block);
347         return 0;
348 }
349
350 /*
351  * Stop the requeueing of requests again.
352  */
353 static int dasd_state_online_to_ready(struct dasd_device *device)
354 {
355         int rc;
356
357         if (device->discipline->online_to_ready) {
358                 rc = device->discipline->online_to_ready(device);
359                 if (rc)
360                         return rc;
361         }
362         device->state = DASD_STATE_READY;
363         return 0;
364 }
365
366 /*
367  * Device startup state changes.
368  */
369 static int dasd_increase_state(struct dasd_device *device)
370 {
371         int rc;
372
373         rc = 0;
374         if (device->state == DASD_STATE_NEW &&
375             device->target >= DASD_STATE_KNOWN)
376                 rc = dasd_state_new_to_known(device);
377
378         if (!rc &&
379             device->state == DASD_STATE_KNOWN &&
380             device->target >= DASD_STATE_BASIC)
381                 rc = dasd_state_known_to_basic(device);
382
383         if (!rc &&
384             device->state == DASD_STATE_BASIC &&
385             device->target >= DASD_STATE_READY)
386                 rc = dasd_state_basic_to_ready(device);
387
388         if (!rc &&
389             device->state == DASD_STATE_UNFMT &&
390             device->target > DASD_STATE_UNFMT)
391                 rc = -EPERM;
392
393         if (!rc &&
394             device->state == DASD_STATE_READY &&
395             device->target >= DASD_STATE_ONLINE)
396                 rc = dasd_state_ready_to_online(device);
397
398         return rc;
399 }
400
401 /*
402  * Device shutdown state changes.
403  */
404 static int dasd_decrease_state(struct dasd_device *device)
405 {
406         int rc;
407
408         rc = 0;
409         if (device->state == DASD_STATE_ONLINE &&
410             device->target <= DASD_STATE_READY)
411                 rc = dasd_state_online_to_ready(device);
412
413         if (!rc &&
414             device->state == DASD_STATE_READY &&
415             device->target <= DASD_STATE_BASIC)
416                 rc = dasd_state_ready_to_basic(device);
417
418         if (!rc &&
419             device->state == DASD_STATE_UNFMT &&
420             device->target <= DASD_STATE_BASIC)
421                 rc = dasd_state_unfmt_to_basic(device);
422
423         if (!rc &&
424             device->state == DASD_STATE_BASIC &&
425             device->target <= DASD_STATE_KNOWN)
426                 rc = dasd_state_basic_to_known(device);
427
428         if (!rc &&
429             device->state == DASD_STATE_KNOWN &&
430             device->target <= DASD_STATE_NEW)
431                 rc = dasd_state_known_to_new(device);
432
433         return rc;
434 }
435
436 /*
437  * This is the main startup/shutdown routine.
438  */
439 static void dasd_change_state(struct dasd_device *device)
440 {
441         int rc;
442
443         if (device->state == device->target)
444                 /* Already where we want to go today... */
445                 return;
446         if (device->state < device->target)
447                 rc = dasd_increase_state(device);
448         else
449                 rc = dasd_decrease_state(device);
450         if (rc && rc != -EAGAIN)
451                 device->target = device->state;
452
453         if (device->state == device->target)
454                 wake_up(&dasd_init_waitq);
455
456         /* let user-space know that the device status changed */
457         kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
458 }
459
460 /*
461  * Kick starter for devices that did not complete the startup/shutdown
462  * procedure or were sleeping because of a pending state.
463  * dasd_kick_device will schedule a call do do_kick_device to the kernel
464  * event daemon.
465  */
466 static void do_kick_device(struct work_struct *work)
467 {
468         struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
469         dasd_change_state(device);
470         dasd_schedule_device_bh(device);
471         dasd_put_device(device);
472 }
473
474 void dasd_kick_device(struct dasd_device *device)
475 {
476         dasd_get_device(device);
477         /* queue call to dasd_kick_device to the kernel event daemon. */
478         schedule_work(&device->kick_work);
479 }
480
481 /*
482  * Set the target state for a device and starts the state change.
483  */
484 void dasd_set_target_state(struct dasd_device *device, int target)
485 {
486         /* If we are in probeonly mode stop at DASD_STATE_READY. */
487         if (dasd_probeonly && target > DASD_STATE_READY)
488                 target = DASD_STATE_READY;
489         if (device->target != target) {
490                 if (device->state == target)
491                         wake_up(&dasd_init_waitq);
492                 device->target = target;
493         }
494         if (device->state != device->target)
495                 dasd_change_state(device);
496 }
497
498 /*
499  * Enable devices with device numbers in [from..to].
500  */
501 static inline int _wait_for_device(struct dasd_device *device)
502 {
503         return (device->state == device->target);
504 }
505
506 void dasd_enable_device(struct dasd_device *device)
507 {
508         dasd_set_target_state(device, DASD_STATE_ONLINE);
509         if (device->state <= DASD_STATE_KNOWN)
510                 /* No discipline for device found. */
511                 dasd_set_target_state(device, DASD_STATE_NEW);
512         /* Now wait for the devices to come up. */
513         wait_event(dasd_init_waitq, _wait_for_device(device));
514 }
515
516 /*
517  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
518  */
519 #ifdef CONFIG_DASD_PROFILE
520
521 struct dasd_profile_info_t dasd_global_profile;
522 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
523
524 /*
525  * Increments counter in global and local profiling structures.
526  */
527 #define dasd_profile_counter(value, counter, block) \
528 { \
529         int index; \
530         for (index = 0; index < 31 && value >> (2+index); index++); \
531         dasd_global_profile.counter[index]++; \
532         block->profile.counter[index]++; \
533 }
534
535 /*
536  * Add profiling information for cqr before execution.
537  */
538 static void dasd_profile_start(struct dasd_block *block,
539                                struct dasd_ccw_req *cqr,
540                                struct request *req)
541 {
542         struct list_head *l;
543         unsigned int counter;
544
545         if (dasd_profile_level != DASD_PROFILE_ON)
546                 return;
547
548         /* count the length of the chanq for statistics */
549         counter = 0;
550         list_for_each(l, &block->ccw_queue)
551                 if (++counter >= 31)
552                         break;
553         dasd_global_profile.dasd_io_nr_req[counter]++;
554         block->profile.dasd_io_nr_req[counter]++;
555 }
556
557 /*
558  * Add profiling information for cqr after execution.
559  */
560 static void dasd_profile_end(struct dasd_block *block,
561                              struct dasd_ccw_req *cqr,
562                              struct request *req)
563 {
564         long strtime, irqtime, endtime, tottime;        /* in microseconds */
565         long tottimeps, sectors;
566
567         if (dasd_profile_level != DASD_PROFILE_ON)
568                 return;
569
570         sectors = req->nr_sectors;
571         if (!cqr->buildclk || !cqr->startclk ||
572             !cqr->stopclk || !cqr->endclk ||
573             !sectors)
574                 return;
575
576         strtime = ((cqr->startclk - cqr->buildclk) >> 12);
577         irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
578         endtime = ((cqr->endclk - cqr->stopclk) >> 12);
579         tottime = ((cqr->endclk - cqr->buildclk) >> 12);
580         tottimeps = tottime / sectors;
581
582         if (!dasd_global_profile.dasd_io_reqs)
583                 memset(&dasd_global_profile, 0,
584                        sizeof(struct dasd_profile_info_t));
585         dasd_global_profile.dasd_io_reqs++;
586         dasd_global_profile.dasd_io_sects += sectors;
587
588         if (!block->profile.dasd_io_reqs)
589                 memset(&block->profile, 0,
590                        sizeof(struct dasd_profile_info_t));
591         block->profile.dasd_io_reqs++;
592         block->profile.dasd_io_sects += sectors;
593
594         dasd_profile_counter(sectors, dasd_io_secs, block);
595         dasd_profile_counter(tottime, dasd_io_times, block);
596         dasd_profile_counter(tottimeps, dasd_io_timps, block);
597         dasd_profile_counter(strtime, dasd_io_time1, block);
598         dasd_profile_counter(irqtime, dasd_io_time2, block);
599         dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
600         dasd_profile_counter(endtime, dasd_io_time3, block);
601 }
602 #else
603 #define dasd_profile_start(block, cqr, req) do {} while (0)
604 #define dasd_profile_end(block, cqr, req) do {} while (0)
605 #endif                          /* CONFIG_DASD_PROFILE */
606
607 /*
608  * Allocate memory for a channel program with 'cplength' channel
609  * command words and 'datasize' additional space. There are two
610  * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
611  * memory and 2) dasd_smalloc_request uses the static ccw memory
612  * that gets allocated for each device.
613  */
614 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
615                                           int datasize,
616                                           struct dasd_device *device)
617 {
618         struct dasd_ccw_req *cqr;
619
620         /* Sanity checks */
621         BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
622              (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
623
624         cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
625         if (cqr == NULL)
626                 return ERR_PTR(-ENOMEM);
627         cqr->cpaddr = NULL;
628         if (cplength > 0) {
629                 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
630                                       GFP_ATOMIC | GFP_DMA);
631                 if (cqr->cpaddr == NULL) {
632                         kfree(cqr);
633                         return ERR_PTR(-ENOMEM);
634                 }
635         }
636         cqr->data = NULL;
637         if (datasize > 0) {
638                 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
639                 if (cqr->data == NULL) {
640                         kfree(cqr->cpaddr);
641                         kfree(cqr);
642                         return ERR_PTR(-ENOMEM);
643                 }
644         }
645         strncpy((char *) &cqr->magic, magic, 4);
646         ASCEBC((char *) &cqr->magic, 4);
647         set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
648         dasd_get_device(device);
649         return cqr;
650 }
651
652 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
653                                           int datasize,
654                                           struct dasd_device *device)
655 {
656         unsigned long flags;
657         struct dasd_ccw_req *cqr;
658         char *data;
659         int size;
660
661         /* Sanity checks */
662         BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
663              (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
664
665         size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
666         if (cplength > 0)
667                 size += cplength * sizeof(struct ccw1);
668         if (datasize > 0)
669                 size += datasize;
670         spin_lock_irqsave(&device->mem_lock, flags);
671         cqr = (struct dasd_ccw_req *)
672                 dasd_alloc_chunk(&device->ccw_chunks, size);
673         spin_unlock_irqrestore(&device->mem_lock, flags);
674         if (cqr == NULL)
675                 return ERR_PTR(-ENOMEM);
676         memset(cqr, 0, sizeof(struct dasd_ccw_req));
677         data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
678         cqr->cpaddr = NULL;
679         if (cplength > 0) {
680                 cqr->cpaddr = (struct ccw1 *) data;
681                 data += cplength*sizeof(struct ccw1);
682                 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
683         }
684         cqr->data = NULL;
685         if (datasize > 0) {
686                 cqr->data = data;
687                 memset(cqr->data, 0, datasize);
688         }
689         strncpy((char *) &cqr->magic, magic, 4);
690         ASCEBC((char *) &cqr->magic, 4);
691         set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
692         dasd_get_device(device);
693         return cqr;
694 }
695
696 /*
697  * Free memory of a channel program. This function needs to free all the
698  * idal lists that might have been created by dasd_set_cda and the
699  * struct dasd_ccw_req itself.
700  */
701 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
702 {
703 #ifdef CONFIG_64BIT
704         struct ccw1 *ccw;
705
706         /* Clear any idals used for the request. */
707         ccw = cqr->cpaddr;
708         do {
709                 clear_normalized_cda(ccw);
710         } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
711 #endif
712         kfree(cqr->cpaddr);
713         kfree(cqr->data);
714         kfree(cqr);
715         dasd_put_device(device);
716 }
717
718 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
719 {
720         unsigned long flags;
721
722         spin_lock_irqsave(&device->mem_lock, flags);
723         dasd_free_chunk(&device->ccw_chunks, cqr);
724         spin_unlock_irqrestore(&device->mem_lock, flags);
725         dasd_put_device(device);
726 }
727
728 /*
729  * Check discipline magic in cqr.
730  */
731 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
732 {
733         struct dasd_device *device;
734
735         if (cqr == NULL)
736                 return -EINVAL;
737         device = cqr->startdev;
738         if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
739                 DEV_MESSAGE(KERN_WARNING, device,
740                             " dasd_ccw_req 0x%08x magic doesn't match"
741                             " discipline 0x%08x",
742                             cqr->magic,
743                             *(unsigned int *) device->discipline->name);
744                 return -EINVAL;
745         }
746         return 0;
747 }
748
749 /*
750  * Terminate the current i/o and set the request to clear_pending.
751  * Timer keeps device runnig.
752  * ccw_device_clear can fail if the i/o subsystem
753  * is in a bad mood.
754  */
755 int dasd_term_IO(struct dasd_ccw_req *cqr)
756 {
757         struct dasd_device *device;
758         int retries, rc;
759
760         /* Check the cqr */
761         rc = dasd_check_cqr(cqr);
762         if (rc)
763                 return rc;
764         retries = 0;
765         device = (struct dasd_device *) cqr->startdev;
766         while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
767                 rc = ccw_device_clear(device->cdev, (long) cqr);
768                 switch (rc) {
769                 case 0: /* termination successful */
770                         cqr->retries--;
771                         cqr->status = DASD_CQR_CLEAR_PENDING;
772                         cqr->stopclk = get_clock();
773                         cqr->starttime = 0;
774                         DBF_DEV_EVENT(DBF_DEBUG, device,
775                                       "terminate cqr %p successful",
776                                       cqr);
777                         break;
778                 case -ENODEV:
779                         DBF_DEV_EVENT(DBF_ERR, device, "%s",
780                                       "device gone, retry");
781                         break;
782                 case -EIO:
783                         DBF_DEV_EVENT(DBF_ERR, device, "%s",
784                                       "I/O error, retry");
785                         break;
786                 case -EINVAL:
787                 case -EBUSY:
788                         DBF_DEV_EVENT(DBF_ERR, device, "%s",
789                                       "device busy, retry later");
790                         break;
791                 default:
792                         DEV_MESSAGE(KERN_ERR, device,
793                                     "line %d unknown RC=%d, please "
794                                     "report to linux390@de.ibm.com",
795                                     __LINE__, rc);
796                         BUG();
797                         break;
798                 }
799                 retries++;
800         }
801         dasd_schedule_device_bh(device);
802         return rc;
803 }
804
805 /*
806  * Start the i/o. This start_IO can fail if the channel is really busy.
807  * In that case set up a timer to start the request later.
808  */
809 int dasd_start_IO(struct dasd_ccw_req *cqr)
810 {
811         struct dasd_device *device;
812         int rc;
813
814         /* Check the cqr */
815         rc = dasd_check_cqr(cqr);
816         if (rc)
817                 return rc;
818         device = (struct dasd_device *) cqr->startdev;
819         if (cqr->retries < 0) {
820                 DEV_MESSAGE(KERN_DEBUG, device,
821                             "start_IO: request %p (%02x/%i) - no retry left.",
822                             cqr, cqr->status, cqr->retries);
823                 cqr->status = DASD_CQR_ERROR;
824                 return -EIO;
825         }
826         cqr->startclk = get_clock();
827         cqr->starttime = jiffies;
828         cqr->retries--;
829         rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
830                               cqr->lpm, 0);
831         switch (rc) {
832         case 0:
833                 cqr->status = DASD_CQR_IN_IO;
834                 DBF_DEV_EVENT(DBF_DEBUG, device,
835                               "start_IO: request %p started successful",
836                               cqr);
837                 break;
838         case -EBUSY:
839                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
840                               "start_IO: device busy, retry later");
841                 break;
842         case -ETIMEDOUT:
843                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
844                               "start_IO: request timeout, retry later");
845                 break;
846         case -EACCES:
847                 /* -EACCES indicates that the request used only a
848                  * subset of the available pathes and all these
849                  * pathes are gone.
850                  * Do a retry with all available pathes.
851                  */
852                 cqr->lpm = LPM_ANYPATH;
853                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
854                               "start_IO: selected pathes gone,"
855                               " retry on all pathes");
856                 break;
857         case -ENODEV:
858         case -EIO:
859                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
860                               "start_IO: device gone, retry");
861                 break;
862         default:
863                 DEV_MESSAGE(KERN_ERR, device,
864                             "line %d unknown RC=%d, please report"
865                             " to linux390@de.ibm.com", __LINE__, rc);
866                 BUG();
867                 break;
868         }
869         return rc;
870 }
871
872 /*
873  * Timeout function for dasd devices. This is used for different purposes
874  *  1) missing interrupt handler for normal operation
875  *  2) delayed start of request where start_IO failed with -EBUSY
876  *  3) timeout for missing state change interrupts
877  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
878  * DASD_CQR_QUEUED for 2) and 3).
879  */
880 static void dasd_device_timeout(unsigned long ptr)
881 {
882         unsigned long flags;
883         struct dasd_device *device;
884
885         device = (struct dasd_device *) ptr;
886         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
887         /* re-activate request queue */
888         device->stopped &= ~DASD_STOPPED_PENDING;
889         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
890         dasd_schedule_device_bh(device);
891 }
892
893 /*
894  * Setup timeout for a device in jiffies.
895  */
896 void dasd_device_set_timer(struct dasd_device *device, int expires)
897 {
898         if (expires == 0) {
899                 if (timer_pending(&device->timer))
900                         del_timer(&device->timer);
901                 return;
902         }
903         if (timer_pending(&device->timer)) {
904                 if (mod_timer(&device->timer, jiffies + expires))
905                         return;
906         }
907         device->timer.function = dasd_device_timeout;
908         device->timer.data = (unsigned long) device;
909         device->timer.expires = jiffies + expires;
910         add_timer(&device->timer);
911 }
912
913 /*
914  * Clear timeout for a device.
915  */
916 void dasd_device_clear_timer(struct dasd_device *device)
917 {
918         if (timer_pending(&device->timer))
919                 del_timer(&device->timer);
920 }
921
922 static void dasd_handle_killed_request(struct ccw_device *cdev,
923                                        unsigned long intparm)
924 {
925         struct dasd_ccw_req *cqr;
926         struct dasd_device *device;
927
928         if (!intparm)
929                 return;
930         cqr = (struct dasd_ccw_req *) intparm;
931         if (cqr->status != DASD_CQR_IN_IO) {
932                 MESSAGE(KERN_DEBUG,
933                         "invalid status in handle_killed_request: "
934                         "bus_id %s, status %02x",
935                         cdev->dev.bus_id, cqr->status);
936                 return;
937         }
938
939         device = (struct dasd_device *) cqr->startdev;
940         if (device == NULL ||
941             device != dasd_device_from_cdev_locked(cdev) ||
942             strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
943                 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
944                         cdev->dev.bus_id);
945                 return;
946         }
947
948         /* Schedule request to be retried. */
949         cqr->status = DASD_CQR_QUEUED;
950
951         dasd_device_clear_timer(device);
952         dasd_schedule_device_bh(device);
953         dasd_put_device(device);
954 }
955
956 void dasd_generic_handle_state_change(struct dasd_device *device)
957 {
958         /* First of all start sense subsystem status request. */
959         dasd_eer_snss(device);
960
961         device->stopped &= ~DASD_STOPPED_PENDING;
962         dasd_schedule_device_bh(device);
963         if (device->block)
964                 dasd_schedule_block_bh(device->block);
965 }
966
967 /*
968  * Interrupt handler for "normal" ssch-io based dasd devices.
969  */
970 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
971                       struct irb *irb)
972 {
973         struct dasd_ccw_req *cqr, *next;
974         struct dasd_device *device;
975         unsigned long long now;
976         int expires;
977
978         if (IS_ERR(irb)) {
979                 switch (PTR_ERR(irb)) {
980                 case -EIO:
981                         break;
982                 case -ETIMEDOUT:
983                         printk(KERN_WARNING"%s(%s): request timed out\n",
984                                __func__, cdev->dev.bus_id);
985                         break;
986                 default:
987                         printk(KERN_WARNING"%s(%s): unknown error %ld\n",
988                                __func__, cdev->dev.bus_id, PTR_ERR(irb));
989                 }
990                 dasd_handle_killed_request(cdev, intparm);
991                 return;
992         }
993
994         now = get_clock();
995
996         DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
997                   cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
998                   (unsigned int) intparm);
999
1000         /* check for unsolicited interrupts */
1001         cqr = (struct dasd_ccw_req *) intparm;
1002         if (!cqr || ((irb->scsw.cc == 1) &&
1003                      (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
1004                      (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
1005                 if (cqr && cqr->status == DASD_CQR_IN_IO)
1006                         cqr->status = DASD_CQR_QUEUED;
1007                 device = dasd_device_from_cdev_locked(cdev);
1008                 if (!IS_ERR(device)) {
1009                         dasd_device_clear_timer(device);
1010                         device->discipline->handle_unsolicited_interrupt(device,
1011                                                                          irb);
1012                         dasd_put_device(device);
1013                 }
1014                 return;
1015         }
1016
1017         device = (struct dasd_device *) cqr->startdev;
1018         if (!device ||
1019             strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1020                 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
1021                         cdev->dev.bus_id);
1022                 return;
1023         }
1024
1025         /* Check for clear pending */
1026         if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1027             irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1028                 cqr->status = DASD_CQR_CLEARED;
1029                 dasd_device_clear_timer(device);
1030                 wake_up(&dasd_flush_wq);
1031                 dasd_schedule_device_bh(device);
1032                 return;
1033         }
1034
1035         /* check status - the request might have been killed by dyn detach */
1036         if (cqr->status != DASD_CQR_IN_IO) {
1037                 MESSAGE(KERN_DEBUG,
1038                         "invalid status: bus_id %s, status %02x",
1039                         cdev->dev.bus_id, cqr->status);
1040                 return;
1041         }
1042         DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1043                       ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
1044         next = NULL;
1045         expires = 0;
1046         if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1047             irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
1048                 /* request was completed successfully */
1049                 cqr->status = DASD_CQR_SUCCESS;
1050                 cqr->stopclk = now;
1051                 /* Start first request on queue if possible -> fast_io. */
1052                 if (cqr->devlist.next != &device->ccw_queue) {
1053                         next = list_entry(cqr->devlist.next,
1054                                           struct dasd_ccw_req, devlist);
1055                 }
1056         } else {  /* error */
1057                 memcpy(&cqr->irb, irb, sizeof(struct irb));
1058                 if (device->features & DASD_FEATURE_ERPLOG) {
1059                         dasd_log_sense(cqr, irb);
1060                 }
1061                 /*
1062                  * If we don't want complex ERP for this request, then just
1063                  * reset this and retry it in the fastpath
1064                  */
1065                 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1066                     cqr->retries > 0) {
1067                         DEV_MESSAGE(KERN_DEBUG, device,
1068                                     "default ERP in fastpath (%i retries left)",
1069                                     cqr->retries);
1070                         cqr->lpm    = LPM_ANYPATH;
1071                         cqr->status = DASD_CQR_QUEUED;
1072                         next = cqr;
1073                 } else
1074                         cqr->status = DASD_CQR_ERROR;
1075         }
1076         if (next && (next->status == DASD_CQR_QUEUED) &&
1077             (!device->stopped)) {
1078                 if (device->discipline->start_IO(next) == 0)
1079                         expires = next->expires;
1080                 else
1081                         DEV_MESSAGE(KERN_DEBUG, device, "%s",
1082                                     "Interrupt fastpath "
1083                                     "failed!");
1084         }
1085         if (expires != 0)
1086                 dasd_device_set_timer(device, expires);
1087         else
1088                 dasd_device_clear_timer(device);
1089         dasd_schedule_device_bh(device);
1090 }
1091
1092 /*
1093  * If we have an error on a dasd_block layer request then we cancel
1094  * and return all further requests from the same dasd_block as well.
1095  */
1096 static void __dasd_device_recovery(struct dasd_device *device,
1097                                    struct dasd_ccw_req *ref_cqr)
1098 {
1099         struct list_head *l, *n;
1100         struct dasd_ccw_req *cqr;
1101
1102         /*
1103          * only requeue request that came from the dasd_block layer
1104          */
1105         if (!ref_cqr->block)
1106                 return;
1107
1108         list_for_each_safe(l, n, &device->ccw_queue) {
1109                 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1110                 if (cqr->status == DASD_CQR_QUEUED &&
1111                     ref_cqr->block == cqr->block) {
1112                         cqr->status = DASD_CQR_CLEARED;
1113                 }
1114         }
1115 };
1116
1117 /*
1118  * Remove those ccw requests from the queue that need to be returned
1119  * to the upper layer.
1120  */
1121 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1122                                             struct list_head *final_queue)
1123 {
1124         struct list_head *l, *n;
1125         struct dasd_ccw_req *cqr;
1126
1127         /* Process request with final status. */
1128         list_for_each_safe(l, n, &device->ccw_queue) {
1129                 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1130
1131                 /* Stop list processing at the first non-final request. */
1132                 if (cqr->status == DASD_CQR_QUEUED ||
1133                     cqr->status == DASD_CQR_IN_IO ||
1134                     cqr->status == DASD_CQR_CLEAR_PENDING)
1135                         break;
1136                 if (cqr->status == DASD_CQR_ERROR) {
1137                         __dasd_device_recovery(device, cqr);
1138                 }
1139                 /* Rechain finished requests to final queue */
1140                 list_move_tail(&cqr->devlist, final_queue);
1141         }
1142 }
1143
1144 /*
1145  * the cqrs from the final queue are returned to the upper layer
1146  * by setting a dasd_block state and calling the callback function
1147  */
1148 static void __dasd_device_process_final_queue(struct dasd_device *device,
1149                                               struct list_head *final_queue)
1150 {
1151         struct list_head *l, *n;
1152         struct dasd_ccw_req *cqr;
1153         struct dasd_block *block;
1154
1155         list_for_each_safe(l, n, final_queue) {
1156                 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1157                 list_del_init(&cqr->devlist);
1158                 block = cqr->block;
1159                 if (block)
1160                         spin_lock_bh(&block->queue_lock);
1161                 switch (cqr->status) {
1162                 case DASD_CQR_SUCCESS:
1163                         cqr->status = DASD_CQR_DONE;
1164                         break;
1165                 case DASD_CQR_ERROR:
1166                         cqr->status = DASD_CQR_NEED_ERP;
1167                         break;
1168                 case DASD_CQR_CLEARED:
1169                         cqr->status = DASD_CQR_TERMINATED;
1170                         break;
1171                 default:
1172                         DEV_MESSAGE(KERN_ERR, device,
1173                                     "wrong cqr status in __dasd_process_final_queue "
1174                                     "for cqr %p, status %x",
1175                                     cqr, cqr->status);
1176                         BUG();
1177                 }
1178                 if (cqr->callback != NULL)
1179                         (cqr->callback)(cqr, cqr->callback_data);
1180                 if (block)
1181                         spin_unlock_bh(&block->queue_lock);
1182         }
1183 }
1184
1185 /*
1186  * Take a look at the first request on the ccw queue and check
1187  * if it reached its expire time. If so, terminate the IO.
1188  */
1189 static void __dasd_device_check_expire(struct dasd_device *device)
1190 {
1191         struct dasd_ccw_req *cqr;
1192
1193         if (list_empty(&device->ccw_queue))
1194                 return;
1195         cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1196         if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1197             (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1198                 if (device->discipline->term_IO(cqr) != 0) {
1199                         /* Hmpf, try again in 5 sec */
1200                         DEV_MESSAGE(KERN_ERR, device,
1201                                     "internal error - timeout (%is) expired "
1202                                     "for cqr %p, termination failed, "
1203                                     "retrying in 5s",
1204                                     (cqr->expires/HZ), cqr);
1205                         cqr->expires += 5*HZ;
1206                         dasd_device_set_timer(device, 5*HZ);
1207                 } else {
1208                         DEV_MESSAGE(KERN_ERR, device,
1209                                     "internal error - timeout (%is) expired "
1210                                     "for cqr %p (%i retries left)",
1211                                     (cqr->expires/HZ), cqr, cqr->retries);
1212                 }
1213         }
1214 }
1215
1216 /*
1217  * Take a look at the first request on the ccw queue and check
1218  * if it needs to be started.
1219  */
1220 static void __dasd_device_start_head(struct dasd_device *device)
1221 {
1222         struct dasd_ccw_req *cqr;
1223         int rc;
1224
1225         if (list_empty(&device->ccw_queue))
1226                 return;
1227         cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1228         if (cqr->status != DASD_CQR_QUEUED)
1229                 return;
1230         /* when device is stopped, return request to previous layer */
1231         if (device->stopped) {
1232                 cqr->status = DASD_CQR_CLEARED;
1233                 dasd_schedule_device_bh(device);
1234                 return;
1235         }
1236
1237         rc = device->discipline->start_IO(cqr);
1238         if (rc == 0)
1239                 dasd_device_set_timer(device, cqr->expires);
1240         else if (rc == -EACCES) {
1241                 dasd_schedule_device_bh(device);
1242         } else
1243                 /* Hmpf, try again in 1/2 sec */
1244                 dasd_device_set_timer(device, 50);
1245 }
1246
1247 /*
1248  * Go through all request on the dasd_device request queue,
1249  * terminate them on the cdev if necessary, and return them to the
1250  * submitting layer via callback.
1251  * Note:
1252  * Make sure that all 'submitting layers' still exist when
1253  * this function is called!. In other words, when 'device' is a base
1254  * device then all block layer requests must have been removed before
1255  * via dasd_flush_block_queue.
1256  */
1257 int dasd_flush_device_queue(struct dasd_device *device)
1258 {
1259         struct dasd_ccw_req *cqr, *n;
1260         int rc;
1261         struct list_head flush_queue;
1262
1263         INIT_LIST_HEAD(&flush_queue);
1264         spin_lock_irq(get_ccwdev_lock(device->cdev));
1265         rc = 0;
1266         list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1267                 /* Check status and move request to flush_queue */
1268                 switch (cqr->status) {
1269                 case DASD_CQR_IN_IO:
1270                         rc = device->discipline->term_IO(cqr);
1271                         if (rc) {
1272                                 /* unable to terminate requeust */
1273                                 DEV_MESSAGE(KERN_ERR, device,
1274                                             "dasd flush ccw_queue is unable "
1275                                             " to terminate request %p",
1276                                             cqr);
1277                                 /* stop flush processing */
1278                                 goto finished;
1279                         }
1280                         break;
1281                 case DASD_CQR_QUEUED:
1282                         cqr->stopclk = get_clock();
1283                         cqr->status = DASD_CQR_CLEARED;
1284                         break;
1285                 default: /* no need to modify the others */
1286                         break;
1287                 }
1288                 list_move_tail(&cqr->devlist, &flush_queue);
1289         }
1290 finished:
1291         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1292         /*
1293          * After this point all requests must be in state CLEAR_PENDING,
1294          * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1295          * one of the others.
1296          */
1297         list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1298                 wait_event(dasd_flush_wq,
1299                            (cqr->status != DASD_CQR_CLEAR_PENDING));
1300         /*
1301          * Now set each request back to TERMINATED, DONE or NEED_ERP
1302          * and call the callback function of flushed requests
1303          */
1304         __dasd_device_process_final_queue(device, &flush_queue);
1305         return rc;
1306 }
1307
1308 /*
1309  * Acquire the device lock and process queues for the device.
1310  */
1311 static void dasd_device_tasklet(struct dasd_device *device)
1312 {
1313         struct list_head final_queue;
1314
1315         atomic_set (&device->tasklet_scheduled, 0);
1316         INIT_LIST_HEAD(&final_queue);
1317         spin_lock_irq(get_ccwdev_lock(device->cdev));
1318         /* Check expire time of first request on the ccw queue. */
1319         __dasd_device_check_expire(device);
1320         /* find final requests on ccw queue */
1321         __dasd_device_process_ccw_queue(device, &final_queue);
1322         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1323         /* Now call the callback function of requests with final status */
1324         __dasd_device_process_final_queue(device, &final_queue);
1325         spin_lock_irq(get_ccwdev_lock(device->cdev));
1326         /* Now check if the head of the ccw queue needs to be started. */
1327         __dasd_device_start_head(device);
1328         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1329         dasd_put_device(device);
1330 }
1331
1332 /*
1333  * Schedules a call to dasd_tasklet over the device tasklet.
1334  */
1335 void dasd_schedule_device_bh(struct dasd_device *device)
1336 {
1337         /* Protect against rescheduling. */
1338         if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1339                 return;
1340         dasd_get_device(device);
1341         tasklet_hi_schedule(&device->tasklet);
1342 }
1343
1344 /*
1345  * Queue a request to the head of the device ccw_queue.
1346  * Start the I/O if possible.
1347  */
1348 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1349 {
1350         struct dasd_device *device;
1351         unsigned long flags;
1352
1353         device = cqr->startdev;
1354         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1355         cqr->status = DASD_CQR_QUEUED;
1356         list_add(&cqr->devlist, &device->ccw_queue);
1357         /* let the bh start the request to keep them in order */
1358         dasd_schedule_device_bh(device);
1359         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1360 }
1361
1362 /*
1363  * Queue a request to the tail of the device ccw_queue.
1364  * Start the I/O if possible.
1365  */
1366 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1367 {
1368         struct dasd_device *device;
1369         unsigned long flags;
1370
1371         device = cqr->startdev;
1372         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1373         cqr->status = DASD_CQR_QUEUED;
1374         list_add_tail(&cqr->devlist, &device->ccw_queue);
1375         /* let the bh start the request to keep them in order */
1376         dasd_schedule_device_bh(device);
1377         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1378 }
1379
1380 /*
1381  * Wakeup helper for the 'sleep_on' functions.
1382  */
1383 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1384 {
1385         wake_up((wait_queue_head_t *) data);
1386 }
1387
1388 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1389 {
1390         struct dasd_device *device;
1391         int rc;
1392
1393         device = cqr->startdev;
1394         spin_lock_irq(get_ccwdev_lock(device->cdev));
1395         rc = ((cqr->status == DASD_CQR_DONE ||
1396                cqr->status == DASD_CQR_NEED_ERP ||
1397                cqr->status == DASD_CQR_TERMINATED) &&
1398               list_empty(&cqr->devlist));
1399         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1400         return rc;
1401 }
1402
1403 /*
1404  * Queue a request to the tail of the device ccw_queue and wait for
1405  * it's completion.
1406  */
1407 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1408 {
1409         wait_queue_head_t wait_q;
1410         struct dasd_device *device;
1411         int rc;
1412
1413         device = cqr->startdev;
1414
1415         init_waitqueue_head (&wait_q);
1416         cqr->callback = dasd_wakeup_cb;
1417         cqr->callback_data = (void *) &wait_q;
1418         dasd_add_request_tail(cqr);
1419         wait_event(wait_q, _wait_for_wakeup(cqr));
1420
1421         /* Request status is either done or failed. */
1422         rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1423         return rc;
1424 }
1425
1426 /*
1427  * Queue a request to the tail of the device ccw_queue and wait
1428  * interruptible for it's completion.
1429  */
1430 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1431 {
1432         wait_queue_head_t wait_q;
1433         struct dasd_device *device;
1434         int rc;
1435
1436         device = cqr->startdev;
1437         init_waitqueue_head (&wait_q);
1438         cqr->callback = dasd_wakeup_cb;
1439         cqr->callback_data = (void *) &wait_q;
1440         dasd_add_request_tail(cqr);
1441         rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1442         if (rc == -ERESTARTSYS) {
1443                 dasd_cancel_req(cqr);
1444                 /* wait (non-interruptible) for final status */
1445                 wait_event(wait_q, _wait_for_wakeup(cqr));
1446         }
1447         rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1448         return rc;
1449 }
1450
1451 /*
1452  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1453  * for eckd devices) the currently running request has to be terminated
1454  * and be put back to status queued, before the special request is added
1455  * to the head of the queue. Then the special request is waited on normally.
1456  */
1457 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1458 {
1459         struct dasd_ccw_req *cqr;
1460
1461         if (list_empty(&device->ccw_queue))
1462                 return 0;
1463         cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1464         return device->discipline->term_IO(cqr);
1465 }
1466
1467 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1468 {
1469         wait_queue_head_t wait_q;
1470         struct dasd_device *device;
1471         int rc;
1472
1473         device = cqr->startdev;
1474         spin_lock_irq(get_ccwdev_lock(device->cdev));
1475         rc = _dasd_term_running_cqr(device);
1476         if (rc) {
1477                 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1478                 return rc;
1479         }
1480
1481         init_waitqueue_head (&wait_q);
1482         cqr->callback = dasd_wakeup_cb;
1483         cqr->callback_data = (void *) &wait_q;
1484         cqr->status = DASD_CQR_QUEUED;
1485         list_add(&cqr->devlist, &device->ccw_queue);
1486
1487         /* let the bh start the request to keep them in order */
1488         dasd_schedule_device_bh(device);
1489
1490         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1491
1492         wait_event(wait_q, _wait_for_wakeup(cqr));
1493
1494         /* Request status is either done or failed. */
1495         rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1496         return rc;
1497 }
1498
1499 /*
1500  * Cancels a request that was started with dasd_sleep_on_req.
1501  * This is useful to timeout requests. The request will be
1502  * terminated if it is currently in i/o.
1503  * Returns 1 if the request has been terminated.
1504  *         0 if there was no need to terminate the request (not started yet)
1505  *         negative error code if termination failed
1506  * Cancellation of a request is an asynchronous operation! The calling
1507  * function has to wait until the request is properly returned via callback.
1508  */
1509 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1510 {
1511         struct dasd_device *device = cqr->startdev;
1512         unsigned long flags;
1513         int rc;
1514
1515         rc = 0;
1516         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1517         switch (cqr->status) {
1518         case DASD_CQR_QUEUED:
1519                 /* request was not started - just set to cleared */
1520                 cqr->status = DASD_CQR_CLEARED;
1521                 break;
1522         case DASD_CQR_IN_IO:
1523                 /* request in IO - terminate IO and release again */
1524                 rc = device->discipline->term_IO(cqr);
1525                 if (rc) {
1526                         DEV_MESSAGE(KERN_ERR, device,
1527                                     "dasd_cancel_req is unable "
1528                                     " to terminate request %p, rc = %d",
1529                                     cqr, rc);
1530                 } else {
1531                         cqr->stopclk = get_clock();
1532                         rc = 1;
1533                 }
1534                 break;
1535         default: /* already finished or clear pending - do nothing */
1536                 break;
1537         }
1538         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1539         dasd_schedule_device_bh(device);
1540         return rc;
1541 }
1542
1543
1544 /*
1545  * SECTION: Operations of the dasd_block layer.
1546  */
1547
1548 /*
1549  * Timeout function for dasd_block. This is used when the block layer
1550  * is waiting for something that may not come reliably, (e.g. a state
1551  * change interrupt)
1552  */
1553 static void dasd_block_timeout(unsigned long ptr)
1554 {
1555         unsigned long flags;
1556         struct dasd_block *block;
1557
1558         block = (struct dasd_block *) ptr;
1559         spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1560         /* re-activate request queue */
1561         block->base->stopped &= ~DASD_STOPPED_PENDING;
1562         spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1563         dasd_schedule_block_bh(block);
1564 }
1565
1566 /*
1567  * Setup timeout for a dasd_block in jiffies.
1568  */
1569 void dasd_block_set_timer(struct dasd_block *block, int expires)
1570 {
1571         if (expires == 0) {
1572                 if (timer_pending(&block->timer))
1573                         del_timer(&block->timer);
1574                 return;
1575         }
1576         if (timer_pending(&block->timer)) {
1577                 if (mod_timer(&block->timer, jiffies + expires))
1578                         return;
1579         }
1580         block->timer.function = dasd_block_timeout;
1581         block->timer.data = (unsigned long) block;
1582         block->timer.expires = jiffies + expires;
1583         add_timer(&block->timer);
1584 }
1585
1586 /*
1587  * Clear timeout for a dasd_block.
1588  */
1589 void dasd_block_clear_timer(struct dasd_block *block)
1590 {
1591         if (timer_pending(&block->timer))
1592                 del_timer(&block->timer);
1593 }
1594
1595 /*
1596  * posts the buffer_cache about a finalized request
1597  */
1598 static inline void dasd_end_request(struct request *req, int error)
1599 {
1600         if (__blk_end_request(req, error, blk_rq_bytes(req)))
1601                 BUG();
1602 }
1603
1604 /*
1605  * Process finished error recovery ccw.
1606  */
1607 static inline void __dasd_block_process_erp(struct dasd_block *block,
1608                                             struct dasd_ccw_req *cqr)
1609 {
1610         dasd_erp_fn_t erp_fn;
1611         struct dasd_device *device = block->base;
1612
1613         if (cqr->status == DASD_CQR_DONE)
1614                 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1615         else
1616                 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1617         erp_fn = device->discipline->erp_postaction(cqr);
1618         erp_fn(cqr);
1619 }
1620
1621 /*
1622  * Fetch requests from the block device queue.
1623  */
1624 static void __dasd_process_request_queue(struct dasd_block *block)
1625 {
1626         struct request_queue *queue;
1627         struct request *req;
1628         struct dasd_ccw_req *cqr;
1629         struct dasd_device *basedev;
1630         unsigned long flags;
1631         queue = block->request_queue;
1632         basedev = block->base;
1633         /* No queue ? Then there is nothing to do. */
1634         if (queue == NULL)
1635                 return;
1636
1637         /*
1638          * We requeue request from the block device queue to the ccw
1639          * queue only in two states. In state DASD_STATE_READY the
1640          * partition detection is done and we need to requeue requests
1641          * for that. State DASD_STATE_ONLINE is normal block device
1642          * operation.
1643          */
1644         if (basedev->state < DASD_STATE_READY)
1645                 return;
1646         /* Now we try to fetch requests from the request queue */
1647         while (!blk_queue_plugged(queue) &&
1648                elv_next_request(queue)) {
1649
1650                 req = elv_next_request(queue);
1651
1652                 if (basedev->features & DASD_FEATURE_READONLY &&
1653                     rq_data_dir(req) == WRITE) {
1654                         DBF_DEV_EVENT(DBF_ERR, basedev,
1655                                       "Rejecting write request %p",
1656                                       req);
1657                         blkdev_dequeue_request(req);
1658                         dasd_end_request(req, -EIO);
1659                         continue;
1660                 }
1661                 cqr = basedev->discipline->build_cp(basedev, block, req);
1662                 if (IS_ERR(cqr)) {
1663                         if (PTR_ERR(cqr) == -EBUSY)
1664                                 break;  /* normal end condition */
1665                         if (PTR_ERR(cqr) == -ENOMEM)
1666                                 break;  /* terminate request queue loop */
1667                         if (PTR_ERR(cqr) == -EAGAIN) {
1668                                 /*
1669                                  * The current request cannot be build right
1670                                  * now, we have to try later. If this request
1671                                  * is the head-of-queue we stop the device
1672                                  * for 1/2 second.
1673                                  */
1674                                 if (!list_empty(&block->ccw_queue))
1675                                         break;
1676                                 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1677                                 basedev->stopped |= DASD_STOPPED_PENDING;
1678                                 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1679                                 dasd_block_set_timer(block, HZ/2);
1680                                 break;
1681                         }
1682                         DBF_DEV_EVENT(DBF_ERR, basedev,
1683                                       "CCW creation failed (rc=%ld) "
1684                                       "on request %p",
1685                                       PTR_ERR(cqr), req);
1686                         blkdev_dequeue_request(req);
1687                         dasd_end_request(req, -EIO);
1688                         continue;
1689                 }
1690                 /*
1691                  *  Note: callback is set to dasd_return_cqr_cb in
1692                  * __dasd_block_start_head to cover erp requests as well
1693                  */
1694                 cqr->callback_data = (void *) req;
1695                 cqr->status = DASD_CQR_FILLED;
1696                 blkdev_dequeue_request(req);
1697                 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1698                 dasd_profile_start(block, cqr, req);
1699         }
1700 }
1701
1702 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1703 {
1704         struct request *req;
1705         int status;
1706         int error = 0;
1707
1708         req = (struct request *) cqr->callback_data;
1709         dasd_profile_end(cqr->block, cqr, req);
1710         status = cqr->block->base->discipline->free_cp(cqr, req);
1711         if (status <= 0)
1712                 error = status ? status : -EIO;
1713         dasd_end_request(req, error);
1714 }
1715
1716 /*
1717  * Process ccw request queue.
1718  */
1719 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1720                                            struct list_head *final_queue)
1721 {
1722         struct list_head *l, *n;
1723         struct dasd_ccw_req *cqr;
1724         dasd_erp_fn_t erp_fn;
1725         unsigned long flags;
1726         struct dasd_device *base = block->base;
1727
1728 restart:
1729         /* Process request with final status. */
1730         list_for_each_safe(l, n, &block->ccw_queue) {
1731                 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1732                 if (cqr->status != DASD_CQR_DONE &&
1733                     cqr->status != DASD_CQR_FAILED &&
1734                     cqr->status != DASD_CQR_NEED_ERP &&
1735                     cqr->status != DASD_CQR_TERMINATED)
1736                         continue;
1737
1738                 if (cqr->status == DASD_CQR_TERMINATED) {
1739                         base->discipline->handle_terminated_request(cqr);
1740                         goto restart;
1741                 }
1742
1743                 /*  Process requests that may be recovered */
1744                 if (cqr->status == DASD_CQR_NEED_ERP) {
1745                         erp_fn = base->discipline->erp_action(cqr);
1746                         erp_fn(cqr);
1747                         goto restart;
1748                 }
1749
1750                 /* First of all call extended error reporting. */
1751                 if (dasd_eer_enabled(base) &&
1752                     cqr->status == DASD_CQR_FAILED) {
1753                         dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1754
1755                         /* restart request  */
1756                         cqr->status = DASD_CQR_FILLED;
1757                         cqr->retries = 255;
1758                         spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1759                         base->stopped |= DASD_STOPPED_QUIESCE;
1760                         spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1761                                                flags);
1762                         goto restart;
1763                 }
1764
1765                 /* Process finished ERP request. */
1766                 if (cqr->refers) {
1767                         __dasd_block_process_erp(block, cqr);
1768                         goto restart;
1769                 }
1770
1771                 /* Rechain finished requests to final queue */
1772                 cqr->endclk = get_clock();
1773                 list_move_tail(&cqr->blocklist, final_queue);
1774         }
1775 }
1776
1777 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1778 {
1779         dasd_schedule_block_bh(cqr->block);
1780 }
1781
1782 static void __dasd_block_start_head(struct dasd_block *block)
1783 {
1784         struct dasd_ccw_req *cqr;
1785
1786         if (list_empty(&block->ccw_queue))
1787                 return;
1788         /* We allways begin with the first requests on the queue, as some
1789          * of previously started requests have to be enqueued on a
1790          * dasd_device again for error recovery.
1791          */
1792         list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1793                 if (cqr->status != DASD_CQR_FILLED)
1794                         continue;
1795                 /* Non-temporary stop condition will trigger fail fast */
1796                 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1797                     test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1798                     (!dasd_eer_enabled(block->base))) {
1799                         cqr->status = DASD_CQR_FAILED;
1800                         dasd_schedule_block_bh(block);
1801                         continue;
1802                 }
1803                 /* Don't try to start requests if device is stopped */
1804                 if (block->base->stopped)
1805                         return;
1806
1807                 /* just a fail safe check, should not happen */
1808                 if (!cqr->startdev)
1809                         cqr->startdev = block->base;
1810
1811                 /* make sure that the requests we submit find their way back */
1812                 cqr->callback = dasd_return_cqr_cb;
1813
1814                 dasd_add_request_tail(cqr);
1815         }
1816 }
1817
1818 /*
1819  * Central dasd_block layer routine. Takes requests from the generic
1820  * block layer request queue, creates ccw requests, enqueues them on
1821  * a dasd_device and processes ccw requests that have been returned.
1822  */
1823 static void dasd_block_tasklet(struct dasd_block *block)
1824 {
1825         struct list_head final_queue;
1826         struct list_head *l, *n;
1827         struct dasd_ccw_req *cqr;
1828
1829         atomic_set(&block->tasklet_scheduled, 0);
1830         INIT_LIST_HEAD(&final_queue);
1831         spin_lock(&block->queue_lock);
1832         /* Finish off requests on ccw queue */
1833         __dasd_process_block_ccw_queue(block, &final_queue);
1834         spin_unlock(&block->queue_lock);
1835         /* Now call the callback function of requests with final status */
1836         spin_lock_irq(&block->request_queue_lock);
1837         list_for_each_safe(l, n, &final_queue) {
1838                 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1839                 list_del_init(&cqr->blocklist);
1840                 __dasd_cleanup_cqr(cqr);
1841         }
1842         spin_lock(&block->queue_lock);
1843         /* Get new request from the block device request queue */
1844         __dasd_process_request_queue(block);
1845         /* Now check if the head of the ccw queue needs to be started. */
1846         __dasd_block_start_head(block);
1847         spin_unlock(&block->queue_lock);
1848         spin_unlock_irq(&block->request_queue_lock);
1849         dasd_put_device(block->base);
1850 }
1851
1852 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1853 {
1854         wake_up(&dasd_flush_wq);
1855 }
1856
1857 /*
1858  * Go through all request on the dasd_block request queue, cancel them
1859  * on the respective dasd_device, and return them to the generic
1860  * block layer.
1861  */
1862 static int dasd_flush_block_queue(struct dasd_block *block)
1863 {
1864         struct dasd_ccw_req *cqr, *n;
1865         int rc, i;
1866         struct list_head flush_queue;
1867
1868         INIT_LIST_HEAD(&flush_queue);
1869         spin_lock_bh(&block->queue_lock);
1870         rc = 0;
1871 restart:
1872         list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1873                 /* if this request currently owned by a dasd_device cancel it */
1874                 if (cqr->status >= DASD_CQR_QUEUED)
1875                         rc = dasd_cancel_req(cqr);
1876                 if (rc < 0)
1877                         break;
1878                 /* Rechain request (including erp chain) so it won't be
1879                  * touched by the dasd_block_tasklet anymore.
1880                  * Replace the callback so we notice when the request
1881                  * is returned from the dasd_device layer.
1882                  */
1883                 cqr->callback = _dasd_wake_block_flush_cb;
1884                 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1885                         list_move_tail(&cqr->blocklist, &flush_queue);
1886                 if (i > 1)
1887                         /* moved more than one request - need to restart */
1888                         goto restart;
1889         }
1890         spin_unlock_bh(&block->queue_lock);
1891         /* Now call the callback function of flushed requests */
1892 restart_cb:
1893         list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1894                 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1895                 /* Process finished ERP request. */
1896                 if (cqr->refers) {
1897                         __dasd_block_process_erp(block, cqr);
1898                         /* restart list_for_xx loop since dasd_process_erp
1899                          * might remove multiple elements */
1900                         goto restart_cb;
1901                 }
1902                 /* call the callback function */
1903                 cqr->endclk = get_clock();
1904                 list_del_init(&cqr->blocklist);
1905                 __dasd_cleanup_cqr(cqr);
1906         }
1907         return rc;
1908 }
1909
1910 /*
1911  * Schedules a call to dasd_tasklet over the device tasklet.
1912  */
1913 void dasd_schedule_block_bh(struct dasd_block *block)
1914 {
1915         /* Protect against rescheduling. */
1916         if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1917                 return;
1918         /* life cycle of block is bound to it's base device */
1919         dasd_get_device(block->base);
1920         tasklet_hi_schedule(&block->tasklet);
1921 }
1922
1923
1924 /*
1925  * SECTION: external block device operations
1926  * (request queue handling, open, release, etc.)
1927  */
1928
1929 /*
1930  * Dasd request queue function. Called from ll_rw_blk.c
1931  */
1932 static void do_dasd_request(struct request_queue *queue)
1933 {
1934         struct dasd_block *block;
1935
1936         block = queue->queuedata;
1937         spin_lock(&block->queue_lock);
1938         /* Get new request from the block device request queue */
1939         __dasd_process_request_queue(block);
1940         /* Now check if the head of the ccw queue needs to be started. */
1941         __dasd_block_start_head(block);
1942         spin_unlock(&block->queue_lock);
1943 }
1944
1945 /*
1946  * Allocate and initialize request queue and default I/O scheduler.
1947  */
1948 static int dasd_alloc_queue(struct dasd_block *block)
1949 {
1950         int rc;
1951
1952         block->request_queue = blk_init_queue(do_dasd_request,
1953                                                &block->request_queue_lock);
1954         if (block->request_queue == NULL)
1955                 return -ENOMEM;
1956
1957         block->request_queue->queuedata = block;
1958
1959         elevator_exit(block->request_queue->elevator);
1960         block->request_queue->elevator = NULL;
1961         rc = elevator_init(block->request_queue, "deadline");
1962         if (rc) {
1963                 blk_cleanup_queue(block->request_queue);
1964                 return rc;
1965         }
1966         return 0;
1967 }
1968
1969 /*
1970  * Allocate and initialize request queue.
1971  */
1972 static void dasd_setup_queue(struct dasd_block *block)
1973 {
1974         int max;
1975
1976         blk_queue_hardsect_size(block->request_queue, block->bp_block);
1977         max = block->base->discipline->max_blocks << block->s2b_shift;
1978         blk_queue_max_sectors(block->request_queue, max);
1979         blk_queue_max_phys_segments(block->request_queue, -1L);
1980         blk_queue_max_hw_segments(block->request_queue, -1L);
1981         blk_queue_max_segment_size(block->request_queue, -1L);
1982         blk_queue_segment_boundary(block->request_queue, -1L);
1983         blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1984 }
1985
1986 /*
1987  * Deactivate and free request queue.
1988  */
1989 static void dasd_free_queue(struct dasd_block *block)
1990 {
1991         if (block->request_queue) {
1992                 blk_cleanup_queue(block->request_queue);
1993                 block->request_queue = NULL;
1994         }
1995 }
1996
1997 /*
1998  * Flush request on the request queue.
1999  */
2000 static void dasd_flush_request_queue(struct dasd_block *block)
2001 {
2002         struct request *req;
2003
2004         if (!block->request_queue)
2005                 return;
2006
2007         spin_lock_irq(&block->request_queue_lock);
2008         while ((req = elv_next_request(block->request_queue))) {
2009                 blkdev_dequeue_request(req);
2010                 dasd_end_request(req, -EIO);
2011         }
2012         spin_unlock_irq(&block->request_queue_lock);
2013 }
2014
2015 static int dasd_open(struct inode *inp, struct file *filp)
2016 {
2017         struct gendisk *disk = inp->i_bdev->bd_disk;
2018         struct dasd_block *block = disk->private_data;
2019         struct dasd_device *base = block->base;
2020         int rc;
2021
2022         atomic_inc(&block->open_count);
2023         if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2024                 rc = -ENODEV;
2025                 goto unlock;
2026         }
2027
2028         if (!try_module_get(base->discipline->owner)) {
2029                 rc = -EINVAL;
2030                 goto unlock;
2031         }
2032
2033         if (dasd_probeonly) {
2034                 DEV_MESSAGE(KERN_INFO, base, "%s",
2035                             "No access to device due to probeonly mode");
2036                 rc = -EPERM;
2037                 goto out;
2038         }
2039
2040         if (base->state <= DASD_STATE_BASIC) {
2041                 DBF_DEV_EVENT(DBF_ERR, base, " %s",
2042                               " Cannot open unrecognized device");
2043                 rc = -ENODEV;
2044                 goto out;
2045         }
2046
2047         return 0;
2048
2049 out:
2050         module_put(base->discipline->owner);
2051 unlock:
2052         atomic_dec(&block->open_count);
2053         return rc;
2054 }
2055
2056 static int dasd_release(struct inode *inp, struct file *filp)
2057 {
2058         struct gendisk *disk = inp->i_bdev->bd_disk;
2059         struct dasd_block *block = disk->private_data;
2060
2061         atomic_dec(&block->open_count);
2062         module_put(block->base->discipline->owner);
2063         return 0;
2064 }
2065
2066 /*
2067  * Return disk geometry.
2068  */
2069 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2070 {
2071         struct dasd_block *block;
2072         struct dasd_device *base;
2073
2074         block = bdev->bd_disk->private_data;
2075         base = block->base;
2076         if (!block)
2077                 return -ENODEV;
2078
2079         if (!base->discipline ||
2080             !base->discipline->fill_geometry)
2081                 return -EINVAL;
2082
2083         base->discipline->fill_geometry(block, geo);
2084         geo->start = get_start_sect(bdev) >> block->s2b_shift;
2085         return 0;
2086 }
2087
2088 struct block_device_operations
2089 dasd_device_operations = {
2090         .owner          = THIS_MODULE,
2091         .open           = dasd_open,
2092         .release        = dasd_release,
2093         .ioctl          = dasd_ioctl,
2094         .compat_ioctl   = dasd_compat_ioctl,
2095         .getgeo         = dasd_getgeo,
2096 };
2097
2098 /*******************************************************************************
2099  * end of block device operations
2100  */
2101
2102 static void
2103 dasd_exit(void)
2104 {
2105 #ifdef CONFIG_PROC_FS
2106         dasd_proc_exit();
2107 #endif
2108         dasd_eer_exit();
2109         if (dasd_page_cache != NULL) {
2110                 kmem_cache_destroy(dasd_page_cache);
2111                 dasd_page_cache = NULL;
2112         }
2113         dasd_gendisk_exit();
2114         dasd_devmap_exit();
2115         if (dasd_debug_area != NULL) {
2116                 debug_unregister(dasd_debug_area);
2117                 dasd_debug_area = NULL;
2118         }
2119 }
2120
2121 /*
2122  * SECTION: common functions for ccw_driver use
2123  */
2124
2125 /*
2126  * Initial attempt at a probe function. this can be simplified once
2127  * the other detection code is gone.
2128  */
2129 int dasd_generic_probe(struct ccw_device *cdev,
2130                        struct dasd_discipline *discipline)
2131 {
2132         int ret;
2133
2134         ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2135         if (ret) {
2136                 printk(KERN_WARNING
2137                        "dasd_generic_probe: could not set ccw-device options "
2138                        "for %s\n", cdev->dev.bus_id);
2139                 return ret;
2140         }
2141         ret = dasd_add_sysfs_files(cdev);
2142         if (ret) {
2143                 printk(KERN_WARNING
2144                        "dasd_generic_probe: could not add sysfs entries "
2145                        "for %s\n", cdev->dev.bus_id);
2146                 return ret;
2147         }
2148         cdev->handler = &dasd_int_handler;
2149
2150         /*
2151          * Automatically online either all dasd devices (dasd_autodetect)
2152          * or all devices specified with dasd= parameters during
2153          * initial probe.
2154          */
2155         if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2156             (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0))
2157                 ret = ccw_device_set_online(cdev);
2158         if (ret)
2159                 printk(KERN_WARNING
2160                        "dasd_generic_probe: could not initially "
2161                        "online ccw-device %s; return code: %d\n",
2162                        cdev->dev.bus_id, ret);
2163         return 0;
2164 }
2165
2166 /*
2167  * This will one day be called from a global not_oper handler.
2168  * It is also used by driver_unregister during module unload.
2169  */
2170 void dasd_generic_remove(struct ccw_device *cdev)
2171 {
2172         struct dasd_device *device;
2173         struct dasd_block *block;
2174
2175         cdev->handler = NULL;
2176
2177         dasd_remove_sysfs_files(cdev);
2178         device = dasd_device_from_cdev(cdev);
2179         if (IS_ERR(device))
2180                 return;
2181         if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2182                 /* Already doing offline processing */
2183                 dasd_put_device(device);
2184                 return;
2185         }
2186         /*
2187          * This device is removed unconditionally. Set offline
2188          * flag to prevent dasd_open from opening it while it is
2189          * no quite down yet.
2190          */
2191         dasd_set_target_state(device, DASD_STATE_NEW);
2192         /* dasd_delete_device destroys the device reference. */
2193         block = device->block;
2194         device->block = NULL;
2195         dasd_delete_device(device);
2196         /*
2197          * life cycle of block is bound to device, so delete it after
2198          * device was safely removed
2199          */
2200         if (block)
2201                 dasd_free_block(block);
2202 }
2203
2204 /*
2205  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2206  * the device is detected for the first time and is supposed to be used
2207  * or the user has started activation through sysfs.
2208  */
2209 int dasd_generic_set_online(struct ccw_device *cdev,
2210                             struct dasd_discipline *base_discipline)
2211 {
2212         struct dasd_discipline *discipline;
2213         struct dasd_device *device;
2214         int rc;
2215
2216         /* first online clears initial online feature flag */
2217         dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2218         device = dasd_create_device(cdev);
2219         if (IS_ERR(device))
2220                 return PTR_ERR(device);
2221
2222         discipline = base_discipline;
2223         if (device->features & DASD_FEATURE_USEDIAG) {
2224                 if (!dasd_diag_discipline_pointer) {
2225                         printk (KERN_WARNING
2226                                 "dasd_generic couldn't online device %s "
2227                                 "- discipline DIAG not available\n",
2228                                 cdev->dev.bus_id);
2229                         dasd_delete_device(device);
2230                         return -ENODEV;
2231                 }
2232                 discipline = dasd_diag_discipline_pointer;
2233         }
2234         if (!try_module_get(base_discipline->owner)) {
2235                 dasd_delete_device(device);
2236                 return -EINVAL;
2237         }
2238         if (!try_module_get(discipline->owner)) {
2239                 module_put(base_discipline->owner);
2240                 dasd_delete_device(device);
2241                 return -EINVAL;
2242         }
2243         device->base_discipline = base_discipline;
2244         device->discipline = discipline;
2245
2246         /* check_device will allocate block device if necessary */
2247         rc = discipline->check_device(device);
2248         if (rc) {
2249                 printk (KERN_WARNING
2250                         "dasd_generic couldn't online device %s "
2251                         "with discipline %s rc=%i\n",
2252                         cdev->dev.bus_id, discipline->name, rc);
2253                 module_put(discipline->owner);
2254                 module_put(base_discipline->owner);
2255                 dasd_delete_device(device);
2256                 return rc;
2257         }
2258
2259         dasd_set_target_state(device, DASD_STATE_ONLINE);
2260         if (device->state <= DASD_STATE_KNOWN) {
2261                 printk (KERN_WARNING
2262                         "dasd_generic discipline not found for %s\n",
2263                         cdev->dev.bus_id);
2264                 rc = -ENODEV;
2265                 dasd_set_target_state(device, DASD_STATE_NEW);
2266                 if (device->block)
2267                         dasd_free_block(device->block);
2268                 dasd_delete_device(device);
2269         } else
2270                 pr_debug("dasd_generic device %s found\n",
2271                                 cdev->dev.bus_id);
2272
2273         /* FIXME: we have to wait for the root device but we don't want
2274          * to wait for each single device but for all at once. */
2275         wait_event(dasd_init_waitq, _wait_for_device(device));
2276
2277         dasd_put_device(device);
2278
2279         return rc;
2280 }
2281
2282 int dasd_generic_set_offline(struct ccw_device *cdev)
2283 {
2284         struct dasd_device *device;
2285         struct dasd_block *block;
2286         int max_count, open_count;
2287
2288         device = dasd_device_from_cdev(cdev);
2289         if (IS_ERR(device))
2290                 return PTR_ERR(device);
2291         if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2292                 /* Already doing offline processing */
2293                 dasd_put_device(device);
2294                 return 0;
2295         }
2296         /*
2297          * We must make sure that this device is currently not in use.
2298          * The open_count is increased for every opener, that includes
2299          * the blkdev_get in dasd_scan_partitions. We are only interested
2300          * in the other openers.
2301          */
2302         if (device->block) {
2303                 max_count = device->block->bdev ? 0 : -1;
2304                 open_count = atomic_read(&device->block->open_count);
2305                 if (open_count > max_count) {
2306                         if (open_count > 0)
2307                                 printk(KERN_WARNING "Can't offline dasd "
2308                                        "device with open count = %i.\n",
2309                                        open_count);
2310                         else
2311                                 printk(KERN_WARNING "%s",
2312                                        "Can't offline dasd device due "
2313                                        "to internal use\n");
2314                         clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2315                         dasd_put_device(device);
2316                         return -EBUSY;
2317                 }
2318         }
2319         dasd_set_target_state(device, DASD_STATE_NEW);
2320         /* dasd_delete_device destroys the device reference. */
2321         block = device->block;
2322         device->block = NULL;
2323         dasd_delete_device(device);
2324         /*
2325          * life cycle of block is bound to device, so delete it after
2326          * device was safely removed
2327          */
2328         if (block)
2329                 dasd_free_block(block);
2330         return 0;
2331 }
2332
2333 int dasd_generic_notify(struct ccw_device *cdev, int event)
2334 {
2335         struct dasd_device *device;
2336         struct dasd_ccw_req *cqr;
2337         unsigned long flags;
2338         int ret;
2339
2340         device = dasd_device_from_cdev(cdev);
2341         if (IS_ERR(device))
2342                 return 0;
2343         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2344         ret = 0;
2345         switch (event) {
2346         case CIO_GONE:
2347         case CIO_NO_PATH:
2348                 /* First of all call extended error reporting. */
2349                 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2350
2351                 if (device->state < DASD_STATE_BASIC)
2352                         break;
2353                 /* Device is active. We want to keep it. */
2354                 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2355                         if (cqr->status == DASD_CQR_IN_IO) {
2356                                 cqr->status = DASD_CQR_QUEUED;
2357                                 cqr->retries++;
2358                         }
2359                 device->stopped |= DASD_STOPPED_DC_WAIT;
2360                 dasd_device_clear_timer(device);
2361                 dasd_schedule_device_bh(device);
2362                 ret = 1;
2363                 break;
2364         case CIO_OPER:
2365                 /* FIXME: add a sanity check. */
2366                 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2367                 dasd_schedule_device_bh(device);
2368                 if (device->block)
2369                         dasd_schedule_block_bh(device->block);
2370                 ret = 1;
2371                 break;
2372         }
2373         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2374         dasd_put_device(device);
2375         return ret;
2376 }
2377
2378 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2379                                                    void *rdc_buffer,
2380                                                    int rdc_buffer_size,
2381                                                    char *magic)
2382 {
2383         struct dasd_ccw_req *cqr;
2384         struct ccw1 *ccw;
2385
2386         cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2387
2388         if (IS_ERR(cqr)) {
2389                 DEV_MESSAGE(KERN_WARNING, device, "%s",
2390                             "Could not allocate RDC request");
2391                 return cqr;
2392         }
2393
2394         ccw = cqr->cpaddr;
2395         ccw->cmd_code = CCW_CMD_RDC;
2396         ccw->cda = (__u32)(addr_t)rdc_buffer;
2397         ccw->count = rdc_buffer_size;
2398
2399         cqr->startdev = device;
2400         cqr->memdev = device;
2401         cqr->expires = 10*HZ;
2402         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2403         cqr->retries = 2;
2404         cqr->buildclk = get_clock();
2405         cqr->status = DASD_CQR_FILLED;
2406         return cqr;
2407 }
2408
2409
2410 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2411                                 void **rdc_buffer, int rdc_buffer_size)
2412 {
2413         int ret;
2414         struct dasd_ccw_req *cqr;
2415
2416         cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size,
2417                                      magic);
2418         if (IS_ERR(cqr))
2419                 return PTR_ERR(cqr);
2420
2421         ret = dasd_sleep_on(cqr);
2422         dasd_sfree_request(cqr, cqr->memdev);
2423         return ret;
2424 }
2425 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2426
2427 static int __init dasd_init(void)
2428 {
2429         int rc;
2430
2431         init_waitqueue_head(&dasd_init_waitq);
2432         init_waitqueue_head(&dasd_flush_wq);
2433
2434         /* register 'common' DASD debug area, used for all DBF_XXX calls */
2435         dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2436         if (dasd_debug_area == NULL) {
2437                 rc = -ENOMEM;
2438                 goto failed;
2439         }
2440         debug_register_view(dasd_debug_area, &debug_sprintf_view);
2441         debug_set_level(dasd_debug_area, DBF_WARNING);
2442
2443         DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2444
2445         dasd_diag_discipline_pointer = NULL;
2446
2447         rc = dasd_devmap_init();
2448         if (rc)
2449                 goto failed;
2450         rc = dasd_gendisk_init();
2451         if (rc)
2452                 goto failed;
2453         rc = dasd_parse();
2454         if (rc)
2455                 goto failed;
2456         rc = dasd_eer_init();
2457         if (rc)
2458                 goto failed;
2459 #ifdef CONFIG_PROC_FS
2460         rc = dasd_proc_init();
2461         if (rc)
2462                 goto failed;
2463 #endif
2464
2465         return 0;
2466 failed:
2467         MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2468         dasd_exit();
2469         return rc;
2470 }
2471
2472 module_init(dasd_init);
2473 module_exit(dasd_exit);
2474
2475 EXPORT_SYMBOL(dasd_debug_area);
2476 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2477
2478 EXPORT_SYMBOL(dasd_add_request_head);
2479 EXPORT_SYMBOL(dasd_add_request_tail);
2480 EXPORT_SYMBOL(dasd_cancel_req);
2481 EXPORT_SYMBOL(dasd_device_clear_timer);
2482 EXPORT_SYMBOL(dasd_block_clear_timer);
2483 EXPORT_SYMBOL(dasd_enable_device);
2484 EXPORT_SYMBOL(dasd_int_handler);
2485 EXPORT_SYMBOL(dasd_kfree_request);
2486 EXPORT_SYMBOL(dasd_kick_device);
2487 EXPORT_SYMBOL(dasd_kmalloc_request);
2488 EXPORT_SYMBOL(dasd_schedule_device_bh);
2489 EXPORT_SYMBOL(dasd_schedule_block_bh);
2490 EXPORT_SYMBOL(dasd_set_target_state);
2491 EXPORT_SYMBOL(dasd_device_set_timer);
2492 EXPORT_SYMBOL(dasd_block_set_timer);
2493 EXPORT_SYMBOL(dasd_sfree_request);
2494 EXPORT_SYMBOL(dasd_sleep_on);
2495 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2496 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2497 EXPORT_SYMBOL(dasd_smalloc_request);
2498 EXPORT_SYMBOL(dasd_start_IO);
2499 EXPORT_SYMBOL(dasd_term_IO);
2500
2501 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2502 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2503 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2504 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2505 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2506 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2507 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2508 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2509 EXPORT_SYMBOL_GPL(dasd_free_block);