1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <net/net_namespace.h>
16 #include <asm/unaligned.h>
19 static int aoe_deadsecs = 60 * 3;
20 module_param(aoe_deadsecs, int, 0644);
21 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
23 static int aoe_maxout = 16;
24 module_param(aoe_maxout, int, 0644);
25 MODULE_PARM_DESC(aoe_maxout,
26 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
28 static struct sk_buff *
33 skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
35 skb_reserve(skb, MAX_HEADER);
36 skb_reset_mac_header(skb);
37 skb_reset_network_header(skb);
38 skb->protocol = __constant_htons(ETH_P_AOE);
39 skb_checksum_none_assert(skb);
45 getframe(struct aoetgt *t, int tag)
58 * Leave the top bit clear so we have tagspace for userland.
59 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
60 * This driver reserves tag -1 to mean "unused frame."
63 newtag(struct aoetgt *t)
68 return n |= (++t->lasttag & 0x7fff) << 16;
72 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
74 u32 host_tag = newtag(t);
76 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
77 memcpy(h->dst, t->addr, sizeof h->dst);
78 h->type = __constant_cpu_to_be16(ETH_P_AOE);
80 h->major = cpu_to_be16(d->aoemajor);
81 h->minor = d->aoeminor;
83 h->tag = cpu_to_be32(host_tag);
89 put_lba(struct aoe_atahdr *ah, sector_t lba)
100 ifrotate(struct aoetgt *t)
103 if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL)
105 if (t->ifp->nd == NULL) {
106 printk(KERN_INFO "aoe: no interface to rotate to\n");
112 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
114 __skb_queue_tail(&d->skbpool, skb);
117 static struct sk_buff *
118 skb_pool_get(struct aoedev *d)
120 struct sk_buff *skb = skb_peek(&d->skbpool);
122 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
123 __skb_unlink(skb, &d->skbpool);
126 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
127 (skb = new_skb(ETH_ZLEN)))
133 /* freeframe is where we do our load balancing so it's a little hairy. */
134 static struct frame *
135 freeframe(struct aoedev *d)
137 struct frame *f, *e, *rf;
141 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
142 printk(KERN_ERR "aoe: NULL TARGETS!\n");
147 if (t >= &d->targets[NTARGETS] || !*t)
150 if ((*t)->nout < (*t)->maxout
155 e = f + (*t)->nframes;
157 if (f->tag != FREETAG)
161 && !(f->skb = skb = new_skb(ETH_ZLEN)))
163 if (atomic_read(&skb_shinfo(skb)->dataref)
169 gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0;
175 /* Work can be done, but the network layer is
176 holding our precious packets. Try to grab
177 one from the pool. */
179 if (f == NULL) { /* more paranoia */
181 "aoe: freeframe: %s.\n",
182 "unexpected null rf");
183 d->flags |= DEVFL_KICKME;
186 skb = skb_pool_get(d);
188 skb_pool_put(d, f->skb);
194 d->flags |= DEVFL_KICKME;
196 if (t == d->tgt) /* we've looped and found nada */
199 if (t >= &d->targets[NTARGETS] || !*t)
206 aoecmd_ata_rw(struct aoedev *d)
210 struct aoe_atahdr *ah;
216 char writebit, extbit;
227 bcnt = t->ifp->maxbcnt;
230 if (bcnt > buf->bv_resid)
231 bcnt = buf->bv_resid;
232 /* initialize the headers & frame */
234 h = (struct aoe_hdr *) skb_mac_header(skb);
235 ah = (struct aoe_atahdr *) (h+1);
236 skb_put(skb, sizeof *h + sizeof *ah);
237 memset(h, 0, skb->len);
238 f->tag = aoehdr_atainit(d, t, h);
242 f->bufaddr = page_address(bv->bv_page) + buf->bv_off;
244 f->lba = buf->sector;
246 /* set up ata header */
247 ah->scnt = bcnt >> 9;
248 put_lba(ah, buf->sector);
249 if (d->flags & DEVFL_EXT) {
250 ah->aflags |= AOEAFL_EXT;
254 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
256 if (bio_data_dir(buf->bio) == WRITE) {
257 skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt);
258 ah->aflags |= AOEAFL_WRITE;
260 skb->data_len = bcnt;
267 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
269 /* mark all tracking fields and load out */
270 buf->nframesout += 1;
272 buf->bv_resid -= bcnt;
274 buf->sector += bcnt >> 9;
275 if (buf->resid == 0) {
277 } else if (buf->bv_resid == 0) {
279 buf->bv_resid = bv->bv_len;
280 WARN_ON(buf->bv_resid == 0);
281 buf->bv_off = bv->bv_offset;
284 skb->dev = t->ifp->nd;
285 skb = skb_clone(skb, GFP_ATOMIC);
287 __skb_queue_tail(&d->sendq, skb);
291 /* some callers cannot sleep, and they can call this function,
292 * transmitting the packets later, when interrupts are on
295 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
298 struct aoe_cfghdr *ch;
300 struct net_device *ifp;
303 for_each_netdev_rcu(&init_net, ifp) {
305 if (!is_aoe_netif(ifp))
308 skb = new_skb(sizeof *h + sizeof *ch);
310 printk(KERN_INFO "aoe: skb alloc failure\n");
313 skb_put(skb, sizeof *h + sizeof *ch);
315 __skb_queue_tail(queue, skb);
316 h = (struct aoe_hdr *) skb_mac_header(skb);
317 memset(h, 0, sizeof *h + sizeof *ch);
319 memset(h->dst, 0xff, sizeof h->dst);
320 memcpy(h->src, ifp->dev_addr, sizeof h->src);
321 h->type = __constant_cpu_to_be16(ETH_P_AOE);
323 h->major = cpu_to_be16(aoemajor);
334 resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
338 struct aoe_atahdr *ah;
345 h = (struct aoe_hdr *) skb_mac_header(skb);
346 ah = (struct aoe_atahdr *) (h+1);
348 snprintf(buf, sizeof buf,
349 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
350 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
351 h->src, h->dst, t->nout);
355 h->tag = cpu_to_be32(n);
356 memcpy(h->dst, t->addr, sizeof h->dst);
357 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
359 switch (ah->cmdstat) {
362 case ATA_CMD_PIO_READ:
363 case ATA_CMD_PIO_READ_EXT:
364 case ATA_CMD_PIO_WRITE:
365 case ATA_CMD_PIO_WRITE_EXT:
372 if (ah->aflags & AOEAFL_WRITE) {
373 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
374 offset_in_page(f->bufaddr), n);
375 skb->len = sizeof *h + sizeof *ah + n;
379 skb->dev = t->ifp->nd;
380 skb = skb_clone(skb, GFP_ATOMIC);
383 __skb_queue_tail(&d->sendq, skb);
391 n = jiffies & 0xffff;
398 static struct aoeif *
399 getif(struct aoetgt *t, struct net_device *nd)
411 static struct aoeif *
412 addif(struct aoetgt *t, struct net_device *nd)
420 p->maxbcnt = DEFAULTBCNT;
427 ejectif(struct aoetgt *t, struct aoeif *ifp)
432 e = t->ifs + NAOEIFS - 1;
433 n = (e - ifp) * sizeof *ifp;
434 memmove(ifp, ifp+1, n);
439 sthtith(struct aoedev *d)
441 struct frame *f, *e, *nf;
443 struct aoetgt *ht = *d->htgt;
448 if (f->tag == FREETAG)
460 resend(d, *d->tgt, nf);
462 /* he's clean, he's useless. take away his interfaces */
463 memset(ht->ifs, 0, sizeof ht->ifs);
468 static inline unsigned char
469 ata_scnt(unsigned char *packet) {
471 struct aoe_atahdr *ah;
473 h = (struct aoe_hdr *) packet;
474 ah = (struct aoe_atahdr *) (h+1);
479 rexmit_timer(ulong vp)
481 struct sk_buff_head queue;
483 struct aoetgt *t, **tt, **te;
486 register long timeout;
489 d = (struct aoedev *) vp;
491 /* timeout is always ~150% of the moving average */
493 timeout += timeout >> 1;
495 spin_lock_irqsave(&d->lock, flags);
497 if (d->flags & DEVFL_TKILL) {
498 spin_unlock_irqrestore(&d->lock, flags);
503 for (; tt < te && *tt; tt++) {
508 if (f->tag == FREETAG
509 || tsince(f->tag) < timeout)
511 n = f->waited += timeout;
513 if (n > aoe_deadsecs) {
514 /* waited too long. device failure. */
519 if (n > HELPWAIT /* see if another target can help */
520 && (tt != d->targets || d->targets[1]))
523 if (t->nout == t->maxout) {
526 t->lastwadj = jiffies;
529 ifp = getif(t, f->skb->dev);
530 if (ifp && ++ifp->lost > (t->nframes << 1)
531 && (ifp != t->ifs || t->ifs[1].nd)) {
536 if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512
537 && ifp && ++ifp->lostjumbo > (t->nframes << 1)
538 && ifp->maxbcnt != DEFAULTBCNT) {
541 "too many lost jumbo on "
543 "falling back to %d frames.\n",
544 d->aoemajor, d->aoeminor,
545 ifp->nd->name, t->addr,
553 if (t->nout == t->maxout
554 && t->maxout < t->nframes
555 && (jiffies - t->lastwadj)/HZ > 10) {
557 t->lastwadj = jiffies;
561 if (!skb_queue_empty(&d->sendq)) {
564 d->rttavg = MAXTIMER;
567 if (d->flags & DEVFL_KICKME || d->htgt) {
568 d->flags &= ~DEVFL_KICKME;
572 __skb_queue_head_init(&queue);
573 skb_queue_splice_init(&d->sendq, &queue);
575 d->timer.expires = jiffies + TIMERTICK;
576 add_timer(&d->timer);
578 spin_unlock_irqrestore(&d->lock, flags);
583 /* enters with d->lock held */
585 aoecmd_work(struct aoedev *d)
589 if (d->htgt && !sthtith(d))
591 if (d->inprocess == NULL) {
592 if (list_empty(&d->bufq))
594 buf = container_of(d->bufq.next, struct buf, bufs);
595 list_del(d->bufq.next);
598 if (aoecmd_ata_rw(d))
602 /* this function performs work that has been deferred until sleeping is OK
605 aoecmd_sleepwork(struct work_struct *work)
607 struct aoedev *d = container_of(work, struct aoedev, work);
609 if (d->flags & DEVFL_GDALLOC)
612 if (d->flags & DEVFL_NEWSIZE) {
613 struct block_device *bd;
617 ssize = get_capacity(d->gd);
618 bd = bdget_disk(d->gd, 0);
621 mutex_lock(&bd->bd_inode->i_mutex);
622 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
623 mutex_unlock(&bd->bd_inode->i_mutex);
626 spin_lock_irqsave(&d->lock, flags);
627 d->flags |= DEVFL_UP;
628 d->flags &= ~DEVFL_NEWSIZE;
629 spin_unlock_irqrestore(&d->lock, flags);
634 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
639 /* word 83: command set supported */
640 n = get_unaligned_le16(&id[83 << 1]);
642 /* word 86: command set/feature enabled */
643 n |= get_unaligned_le16(&id[86 << 1]);
645 if (n & (1<<10)) { /* bit 10: LBA 48 */
646 d->flags |= DEVFL_EXT;
648 /* word 100: number lba48 sectors */
649 ssize = get_unaligned_le64(&id[100 << 1]);
651 /* set as in ide-disk.c:init_idedisk_capacity */
652 d->geo.cylinders = ssize;
653 d->geo.cylinders /= (255 * 63);
657 d->flags &= ~DEVFL_EXT;
659 /* number lba28 sectors */
660 ssize = get_unaligned_le32(&id[60 << 1]);
662 /* NOTE: obsolete in ATA 6 */
663 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
664 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
665 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
668 if (d->ssize != ssize)
670 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
672 d->aoemajor, d->aoeminor,
673 d->fw_ver, (long long)ssize);
676 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
679 set_capacity(d->gd, ssize);
680 d->flags |= DEVFL_NEWSIZE;
682 d->flags |= DEVFL_GDALLOC;
683 schedule_work(&d->work);
687 calc_rttavg(struct aoedev *d, int rtt)
696 else if (n > MAXTIMER)
698 d->mintimer += (n - d->mintimer) >> 1;
699 } else if (n < d->mintimer)
701 else if (n > MAXTIMER)
704 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
709 static struct aoetgt *
710 gettgt(struct aoedev *d, char *addr)
712 struct aoetgt **t, **e;
716 for (; t < e && *t; t++)
717 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
723 diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector)
725 unsigned long n_sect = bio->bi_size >> 9;
726 const int rw = bio_data_dir(bio);
727 struct hd_struct *part;
730 cpu = part_stat_lock();
731 part = disk_map_sector_rcu(disk, sector);
733 part_stat_inc(cpu, part, ios[rw]);
734 part_stat_add(cpu, part, ticks[rw], duration);
735 part_stat_add(cpu, part, sectors[rw], n_sect);
736 part_stat_add(cpu, part, io_ticks, duration);
742 aoecmd_ata_rsp(struct sk_buff *skb)
744 struct sk_buff_head queue;
746 struct aoe_hdr *hin, *hout;
747 struct aoe_atahdr *ahin, *ahout;
757 hin = (struct aoe_hdr *) skb_mac_header(skb);
758 aoemajor = get_unaligned_be16(&hin->major);
759 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
761 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
762 "for unknown device %d.%d\n",
763 aoemajor, hin->minor);
768 spin_lock_irqsave(&d->lock, flags);
770 n = get_unaligned_be32(&hin->tag);
771 t = gettgt(d, hin->src);
773 printk(KERN_INFO "aoe: can't find target e%ld.%d:%pm\n",
774 d->aoemajor, d->aoeminor, hin->src);
775 spin_unlock_irqrestore(&d->lock, flags);
780 calc_rttavg(d, -tsince(n));
781 spin_unlock_irqrestore(&d->lock, flags);
782 snprintf(ebuf, sizeof ebuf,
783 "%15s e%d.%d tag=%08x@%08lx\n",
785 get_unaligned_be16(&hin->major),
787 get_unaligned_be32(&hin->tag),
793 calc_rttavg(d, tsince(f->tag));
795 ahin = (struct aoe_atahdr *) (hin+1);
796 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
797 ahout = (struct aoe_atahdr *) (hout+1);
800 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
802 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
803 ahout->cmdstat, ahin->cmdstat,
804 d->aoemajor, d->aoeminor);
806 buf->flags |= BUFFL_FAIL;
808 if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */
810 n = ahout->scnt << 9;
811 switch (ahout->cmdstat) {
812 case ATA_CMD_PIO_READ:
813 case ATA_CMD_PIO_READ_EXT:
814 if (skb->len - sizeof *hin - sizeof *ahin < n) {
816 "aoe: %s. skb->len=%d need=%ld\n",
817 "runt data size in read", skb->len, n);
818 /* fail frame f? just returning will rexmit. */
819 spin_unlock_irqrestore(&d->lock, flags);
822 memcpy(f->bufaddr, ahin+1, n);
823 case ATA_CMD_PIO_WRITE:
824 case ATA_CMD_PIO_WRITE_EXT:
825 ifp = getif(t, skb->dev);
839 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
841 "aoe: runt data size in ataid. skb->len=%d\n",
843 spin_unlock_irqrestore(&d->lock, flags);
846 ataid_complete(d, t, (char *) (ahin+1));
850 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
852 get_unaligned_be16(&hin->major),
857 if (buf && --buf->nframesout == 0 && buf->resid == 0) {
858 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector);
859 if (buf->flags & BUFFL_FAIL)
860 bio_endio(buf->bio, -EIO);
862 bio_flush_dcache_pages(buf->bio);
863 bio_endio(buf->bio, 0);
865 mempool_free(buf, d->bufpool);
874 __skb_queue_head_init(&queue);
875 skb_queue_splice_init(&d->sendq, &queue);
877 spin_unlock_irqrestore(&d->lock, flags);
882 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
884 struct sk_buff_head queue;
886 __skb_queue_head_init(&queue);
887 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
892 aoecmd_ata_id(struct aoedev *d)
895 struct aoe_atahdr *ah;
906 /* initialize the headers & frame */
908 h = (struct aoe_hdr *) skb_mac_header(skb);
909 ah = (struct aoe_atahdr *) (h+1);
910 skb_put(skb, sizeof *h + sizeof *ah);
911 memset(h, 0, skb->len);
912 f->tag = aoehdr_atainit(d, t, h);
916 /* set up ata header */
918 ah->cmdstat = ATA_CMD_ID_ATA;
921 skb->dev = t->ifp->nd;
923 d->rttavg = MAXTIMER;
924 d->timer.function = rexmit_timer;
926 return skb_clone(skb, GFP_ATOMIC);
929 static struct aoetgt *
930 addtgt(struct aoedev *d, char *addr, ulong nframes)
932 struct aoetgt *t, **tt, **te;
937 for (; tt < te && *tt; tt++)
942 "aoe: device addtgt failure; too many targets\n");
945 t = kcalloc(1, sizeof *t, GFP_ATOMIC);
946 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
950 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
954 t->nframes = nframes;
959 memcpy(t->addr, addr, sizeof t->addr);
961 t->maxout = t->nframes;
966 aoecmd_cfg_rsp(struct sk_buff *skb)
970 struct aoe_cfghdr *ch;
973 ulong flags, sysminor, aoemajor;
977 h = (struct aoe_hdr *) skb_mac_header(skb);
978 ch = (struct aoe_cfghdr *) (h+1);
981 * Enough people have their dip switches set backwards to
982 * warrant a loud message for this special case.
984 aoemajor = get_unaligned_be16(&h->major);
985 if (aoemajor == 0xfff) {
986 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
987 "Check shelf dip switches.\n");
991 sysminor = SYSMINOR(aoemajor, h->minor);
992 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
993 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
994 aoemajor, (int) h->minor);
998 n = be16_to_cpu(ch->bufcnt);
999 if (n > aoe_maxout) /* keep it reasonable */
1002 d = aoedev_by_sysminor_m(sysminor);
1004 printk(KERN_INFO "aoe: device sysminor_m failure\n");
1008 spin_lock_irqsave(&d->lock, flags);
1010 t = gettgt(d, h->src);
1012 t = addtgt(d, h->src, n);
1014 spin_unlock_irqrestore(&d->lock, flags);
1018 ifp = getif(t, skb->dev);
1020 ifp = addif(t, skb->dev);
1023 "aoe: device addif failure; "
1024 "too many interfaces?\n");
1025 spin_unlock_irqrestore(&d->lock, flags);
1031 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
1035 n = n ? n * 512 : DEFAULTBCNT;
1036 if (n != ifp->maxbcnt) {
1038 "aoe: e%ld.%d: setting %d%s%s:%pm\n",
1039 d->aoemajor, d->aoeminor, n,
1040 " byte data frames on ", ifp->nd->name,
1046 /* don't change users' perspective */
1048 spin_unlock_irqrestore(&d->lock, flags);
1051 d->fw_ver = be16_to_cpu(ch->fwver);
1053 sl = aoecmd_ata_id(d);
1055 spin_unlock_irqrestore(&d->lock, flags);
1058 struct sk_buff_head queue;
1059 __skb_queue_head_init(&queue);
1060 __skb_queue_tail(&queue, sl);
1061 aoenet_xmit(&queue);
1066 aoecmd_cleanslate(struct aoedev *d)
1068 struct aoetgt **t, **te;
1069 struct aoeif *p, *e;
1071 d->mintimer = MINTIMER;
1075 for (; t < te && *t; t++) {
1076 (*t)->maxout = (*t)->nframes;
1079 for (; p < e; p++) {
1082 p->maxbcnt = DEFAULTBCNT;