Merge with /home/shaggy/git/linus-clean/
[pandora-kernel.git] / drivers / s390 / cio / device_ops.c
1 /*
2  *  drivers/s390/cio/device_ops.c
3  *
4  *   $Revision: 1.61 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9  *               Cornelia Huck (cornelia.huck@de.ibm.com)
10  */
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19
20 #include <asm/ccwdev.h>
21 #include <asm/idals.h>
22
23 #include "cio.h"
24 #include "cio_debug.h"
25 #include "css.h"
26 #include "chsc.h"
27 #include "device.h"
28
29 int
30 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
31 {
32        /*
33         * The flag usage is mutal exclusive ...
34         */
35         if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
36             (flags & CCWDEV_REPORT_ALL))
37                 return -EINVAL;
38         cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
39         cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
40         cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
41         cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
42         return 0;
43 }
44
45 int
46 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
47 {
48         struct subchannel *sch;
49         int ret;
50
51         if (!cdev)
52                 return -ENODEV;
53         if (cdev->private->state == DEV_STATE_NOT_OPER)
54                 return -ENODEV;
55         if (cdev->private->state != DEV_STATE_ONLINE &&
56             cdev->private->state != DEV_STATE_WAIT4IO &&
57             cdev->private->state != DEV_STATE_W4SENSE)
58                 return -EINVAL;
59         sch = to_subchannel(cdev->dev.parent);
60         if (!sch)
61                 return -ENODEV;
62         ret = cio_clear(sch);
63         if (ret == 0)
64                 cdev->private->intparm = intparm;
65         return ret;
66 }
67
68 int
69 ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
70                      unsigned long intparm, __u8 lpm, __u8 key,
71                      unsigned long flags)
72 {
73         struct subchannel *sch;
74         int ret;
75
76         if (!cdev)
77                 return -ENODEV;
78         sch = to_subchannel(cdev->dev.parent);
79         if (!sch)
80                 return -ENODEV;
81         if (cdev->private->state == DEV_STATE_NOT_OPER)
82                 return -ENODEV;
83         if (cdev->private->state == DEV_STATE_VERIFY) {
84                 /* Remember to fake irb when finished. */
85                 if (!cdev->private->flags.fake_irb) {
86                         cdev->private->flags.fake_irb = 1;
87                         cdev->private->intparm = intparm;
88                         return 0;
89                 } else
90                         /* There's already a fake I/O around. */
91                         return -EBUSY;
92         }
93         if (cdev->private->state != DEV_STATE_ONLINE ||
94             ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
95              !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
96             cdev->private->flags.doverify)
97                 return -EBUSY;
98         ret = cio_set_options (sch, flags);
99         if (ret)
100                 return ret;
101         ret = cio_start_key (sch, cpa, lpm, key);
102         if (ret == 0)
103                 cdev->private->intparm = intparm;
104         return ret;
105 }
106
107
108 int
109 ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
110                              unsigned long intparm, __u8 lpm, __u8 key,
111                              unsigned long flags, int expires)
112 {
113         int ret;
114
115         if (!cdev)
116                 return -ENODEV;
117         ccw_device_set_timeout(cdev, expires);
118         ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
119         if (ret != 0)
120                 ccw_device_set_timeout(cdev, 0);
121         return ret;
122 }
123
124 int
125 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
126                  unsigned long intparm, __u8 lpm, unsigned long flags)
127 {
128         return ccw_device_start_key(cdev, cpa, intparm, lpm,
129                                     PAGE_DEFAULT_KEY, flags);
130 }
131
132 int
133 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
134                          unsigned long intparm, __u8 lpm, unsigned long flags,
135                          int expires)
136 {
137         return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
138                                             PAGE_DEFAULT_KEY, flags,
139                                             expires);
140 }
141
142
143 int
144 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
145 {
146         struct subchannel *sch;
147         int ret;
148
149         if (!cdev)
150                 return -ENODEV;
151         if (cdev->private->state == DEV_STATE_NOT_OPER)
152                 return -ENODEV;
153         if (cdev->private->state != DEV_STATE_ONLINE &&
154             cdev->private->state != DEV_STATE_WAIT4IO &&
155             cdev->private->state != DEV_STATE_W4SENSE)
156                 return -EINVAL;
157         sch = to_subchannel(cdev->dev.parent);
158         if (!sch)
159                 return -ENODEV;
160         ret = cio_halt(sch);
161         if (ret == 0)
162                 cdev->private->intparm = intparm;
163         return ret;
164 }
165
166 int
167 ccw_device_resume(struct ccw_device *cdev)
168 {
169         struct subchannel *sch;
170
171         if (!cdev)
172                 return -ENODEV;
173         sch = to_subchannel(cdev->dev.parent);
174         if (!sch)
175                 return -ENODEV;
176         if (cdev->private->state == DEV_STATE_NOT_OPER)
177                 return -ENODEV;
178         if (cdev->private->state != DEV_STATE_ONLINE ||
179             !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
180                 return -EINVAL;
181         return cio_resume(sch);
182 }
183
184 /*
185  * Pass interrupt to device driver.
186  */
187 int
188 ccw_device_call_handler(struct ccw_device *cdev)
189 {
190         struct subchannel *sch;
191         unsigned int stctl;
192         int ending_status;
193
194         sch = to_subchannel(cdev->dev.parent);
195
196         /*
197          * we allow for the device action handler if .
198          *  - we received ending status
199          *  - the action handler requested to see all interrupts
200          *  - we received an intermediate status
201          *  - fast notification was requested (primary status)
202          *  - unsolicited interrupts
203          */
204         stctl = cdev->private->irb.scsw.stctl;
205         ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
206                 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
207                 (stctl == SCSW_STCTL_STATUS_PEND);
208         if (!ending_status &&
209             !cdev->private->options.repall &&
210             !(stctl & SCSW_STCTL_INTER_STATUS) &&
211             !(cdev->private->options.fast &&
212               (stctl & SCSW_STCTL_PRIM_STATUS)))
213                 return 0;
214
215         /*
216          * Now we are ready to call the device driver interrupt handler.
217          */
218         if (cdev->handler)
219                 cdev->handler(cdev, cdev->private->intparm,
220                               &cdev->private->irb);
221
222         /*
223          * Clear the old and now useless interrupt response block.
224          */
225         memset(&cdev->private->irb, 0, sizeof(struct irb));
226
227         return 1;
228 }
229
230 /*
231  * Search for CIW command in extended sense data.
232  */
233 struct ciw *
234 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
235 {
236         int ciw_cnt;
237
238         if (cdev->private->flags.esid == 0)
239                 return NULL;
240         for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
241                 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
242                         return cdev->private->senseid.ciw + ciw_cnt;
243         return NULL;
244 }
245
246 __u8
247 ccw_device_get_path_mask(struct ccw_device *cdev)
248 {
249         struct subchannel *sch;
250
251         sch = to_subchannel(cdev->dev.parent);
252         if (!sch)
253                 return 0;
254         else
255                 return sch->vpm;
256 }
257
258 static void
259 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
260 {
261         if (!ip)
262                 /* unsolicited interrupt */
263                 return;
264
265         /* Abuse intparm for error reporting. */
266         if (IS_ERR(irb))
267                 cdev->private->intparm = -EIO;
268         else if ((irb->scsw.dstat !=
269                   (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
270                  (irb->scsw.cstat != 0)) {
271                 /*
272                  * We didn't get channel end / device end. Check if path
273                  * verification has been started; we can retry after it has
274                  * finished. We also retry unit checks except for command reject
275                  * or intervention required.
276                  */
277                  if (cdev->private->flags.doverify ||
278                          cdev->private->state == DEV_STATE_VERIFY)
279                          cdev->private->intparm = -EAGAIN;
280                  if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
281                      !(irb->ecw[0] &
282                        (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
283                          cdev->private->intparm = -EAGAIN;
284                  else
285                          cdev->private->intparm = -EIO;
286                          
287         } else
288                 cdev->private->intparm = 0;
289         wake_up(&cdev->private->wait_q);
290 }
291
292 static inline int
293 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
294 {
295         int ret;
296         struct subchannel *sch;
297
298         sch = to_subchannel(cdev->dev.parent);
299         do {
300                 ret = cio_start (sch, ccw, lpm);
301                 if ((ret == -EBUSY) || (ret == -EACCES)) {
302                         /* Try again later. */
303                         spin_unlock_irq(&sch->lock);
304                         msleep(10);
305                         spin_lock_irq(&sch->lock);
306                         continue;
307                 }
308                 if (ret != 0)
309                         /* Non-retryable error. */
310                         break;
311                 /* Wait for end of request. */
312                 cdev->private->intparm = magic;
313                 spin_unlock_irq(&sch->lock);
314                 wait_event(cdev->private->wait_q,
315                            (cdev->private->intparm == -EIO) ||
316                            (cdev->private->intparm == -EAGAIN) ||
317                            (cdev->private->intparm == 0));
318                 spin_lock_irq(&sch->lock);
319                 /* Check at least for channel end / device end */
320                 if (cdev->private->intparm == -EIO) {
321                         /* Non-retryable error. */
322                         ret = -EIO;
323                         break;
324                 }
325                 if (cdev->private->intparm == 0)
326                         /* Success. */
327                         break;
328                 /* Try again later. */
329                 spin_unlock_irq(&sch->lock);
330                 msleep(10);
331                 spin_lock_irq(&sch->lock);
332         } while (1);
333
334         return ret;
335 }
336
337 /**
338  * read_dev_chars() - read device characteristics
339  * @param cdev   target ccw device
340  * @param buffer pointer to buffer for rdc data
341  * @param length size of rdc data
342  * @returns 0 for success, negative error value on failure
343  *
344  * Context:
345  *   called for online device, lock not held
346  **/
347 int
348 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
349 {
350         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
351         struct subchannel *sch;
352         int ret;
353         struct ccw1 *rdc_ccw;
354
355         if (!cdev)
356                 return -ENODEV;
357         if (!buffer || !length)
358                 return -EINVAL;
359         sch = to_subchannel(cdev->dev.parent);
360
361         CIO_TRACE_EVENT (4, "rddevch");
362         CIO_TRACE_EVENT (4, sch->dev.bus_id);
363
364         rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
365         if (!rdc_ccw)
366                 return -ENOMEM;
367         memset(rdc_ccw, 0, sizeof(struct ccw1));
368         rdc_ccw->cmd_code = CCW_CMD_RDC;
369         rdc_ccw->count = length;
370         rdc_ccw->flags = CCW_FLAG_SLI;
371         ret = set_normalized_cda (rdc_ccw, (*buffer));
372         if (ret != 0) {
373                 kfree(rdc_ccw);
374                 return ret;
375         }
376
377         spin_lock_irq(&sch->lock);
378         /* Save interrupt handler. */
379         handler = cdev->handler;
380         /* Temporarily install own handler. */
381         cdev->handler = ccw_device_wake_up;
382         if (cdev->private->state != DEV_STATE_ONLINE)
383                 ret = -ENODEV;
384         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
385                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
386                  cdev->private->flags.doverify)
387                 ret = -EBUSY;
388         else
389                 /* 0x00D9C4C3 == ebcdic "RDC" */
390                 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
391
392         /* Restore interrupt handler. */
393         cdev->handler = handler;
394         spin_unlock_irq(&sch->lock);
395
396         clear_normalized_cda (rdc_ccw);
397         kfree(rdc_ccw);
398
399         return ret;
400 }
401
402 /*
403  *  Read Configuration data using path mask
404  */
405 int
406 read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
407 {
408         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
409         struct subchannel *sch;
410         struct ciw *ciw;
411         char *rcd_buf;
412         int ret;
413         struct ccw1 *rcd_ccw;
414
415         if (!cdev)
416                 return -ENODEV;
417         if (!buffer || !length)
418                 return -EINVAL;
419         sch = to_subchannel(cdev->dev.parent);
420
421         CIO_TRACE_EVENT (4, "rdconf");
422         CIO_TRACE_EVENT (4, sch->dev.bus_id);
423
424         /*
425          * scan for RCD command in extended SenseID data
426          */
427         ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
428         if (!ciw || ciw->cmd == 0)
429                 return -EOPNOTSUPP;
430
431         rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
432         if (!rcd_ccw)
433                 return -ENOMEM;
434         memset(rcd_ccw, 0, sizeof(struct ccw1));
435         rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
436         if (!rcd_buf) {
437                 kfree(rcd_ccw);
438                 return -ENOMEM;
439         }
440         memset (rcd_buf, 0, ciw->count);
441         rcd_ccw->cmd_code = ciw->cmd;
442         rcd_ccw->cda = (__u32) __pa (rcd_buf);
443         rcd_ccw->count = ciw->count;
444         rcd_ccw->flags = CCW_FLAG_SLI;
445
446         spin_lock_irq(&sch->lock);
447         /* Save interrupt handler. */
448         handler = cdev->handler;
449         /* Temporarily install own handler. */
450         cdev->handler = ccw_device_wake_up;
451         if (cdev->private->state != DEV_STATE_ONLINE)
452                 ret = -ENODEV;
453         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
454                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
455                  cdev->private->flags.doverify)
456                 ret = -EBUSY;
457         else
458                 /* 0x00D9C3C4 == ebcdic "RCD" */
459                 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
460
461         /* Restore interrupt handler. */
462         cdev->handler = handler;
463         spin_unlock_irq(&sch->lock);
464
465         /*
466          * on success we update the user input parms
467          */
468         if (ret) {
469                 kfree (rcd_buf);
470                 *buffer = NULL;
471                 *length = 0;
472         } else {
473                 *length = ciw->count;
474                 *buffer = rcd_buf;
475         }
476         kfree(rcd_ccw);
477
478         return ret;
479 }
480
481 /*
482  *  Read Configuration data
483  */
484 int
485 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
486 {
487         return read_conf_data_lpm (cdev, buffer, length, 0);
488 }
489
490 /*
491  * Try to break the lock on a boxed device.
492  */
493 int
494 ccw_device_stlck(struct ccw_device *cdev)
495 {
496         void *buf, *buf2;
497         unsigned long flags;
498         struct subchannel *sch;
499         int ret;
500
501         if (!cdev)
502                 return -ENODEV;
503
504         if (cdev->drv && !cdev->private->options.force)
505                 return -EINVAL;
506
507         sch = to_subchannel(cdev->dev.parent);
508         
509         CIO_TRACE_EVENT(2, "stl lock");
510         CIO_TRACE_EVENT(2, cdev->dev.bus_id);
511
512         buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
513         if (!buf)
514                 return -ENOMEM;
515         buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
516         if (!buf2) {
517                 kfree(buf);
518                 return -ENOMEM;
519         }
520         spin_lock_irqsave(&sch->lock, flags);
521         ret = cio_enable_subchannel(sch, 3);
522         if (ret)
523                 goto out_unlock;
524         /*
525          * Setup ccw. We chain an unconditional reserve and a release so we
526          * only break the lock.
527          */
528         cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
529         cdev->private->iccws[0].cda = (__u32) __pa(buf);
530         cdev->private->iccws[0].count = 32;
531         cdev->private->iccws[0].flags = CCW_FLAG_CC;
532         cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
533         cdev->private->iccws[1].cda = (__u32) __pa(buf2);
534         cdev->private->iccws[1].count = 32;
535         cdev->private->iccws[1].flags = 0;
536         ret = cio_start(sch, cdev->private->iccws, 0);
537         if (ret) {
538                 cio_disable_subchannel(sch); //FIXME: return code?
539                 goto out_unlock;
540         }
541         cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
542         spin_unlock_irqrestore(&sch->lock, flags);
543         wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
544         spin_lock_irqsave(&sch->lock, flags);
545         cio_disable_subchannel(sch); //FIXME: return code?
546         if ((cdev->private->irb.scsw.dstat !=
547              (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
548             (cdev->private->irb.scsw.cstat != 0))
549                 ret = -EIO;
550         /* Clear irb. */
551         memset(&cdev->private->irb, 0, sizeof(struct irb));
552 out_unlock:
553         kfree(buf);
554         kfree(buf2);
555         spin_unlock_irqrestore(&sch->lock, flags);
556         return ret;
557 }
558
559 void *
560 ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
561 {
562         struct subchannel *sch;
563
564         sch = to_subchannel(cdev->dev.parent);
565         return chsc_get_chp_desc(sch, chp_no);
566 }
567
568 // FIXME: these have to go:
569
570 int
571 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
572 {
573         return cdev->private->sch_no;
574 }
575
576 int
577 _ccw_device_get_device_number(struct ccw_device *cdev)
578 {
579         return cdev->private->devno;
580 }
581
582
583 MODULE_LICENSE("GPL");
584 EXPORT_SYMBOL(ccw_device_set_options);
585 EXPORT_SYMBOL(ccw_device_clear);
586 EXPORT_SYMBOL(ccw_device_halt);
587 EXPORT_SYMBOL(ccw_device_resume);
588 EXPORT_SYMBOL(ccw_device_start_timeout);
589 EXPORT_SYMBOL(ccw_device_start);
590 EXPORT_SYMBOL(ccw_device_start_timeout_key);
591 EXPORT_SYMBOL(ccw_device_start_key);
592 EXPORT_SYMBOL(ccw_device_get_ciw);
593 EXPORT_SYMBOL(ccw_device_get_path_mask);
594 EXPORT_SYMBOL(read_conf_data);
595 EXPORT_SYMBOL(read_dev_chars);
596 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
597 EXPORT_SYMBOL(_ccw_device_get_device_number);
598 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
599 EXPORT_SYMBOL_GPL(read_conf_data_lpm);