1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <net/net_namespace.h>
16 #include <asm/unaligned.h>
19 static int aoe_deadsecs = 60 * 3;
20 module_param(aoe_deadsecs, int, 0644);
21 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
23 static int aoe_maxout = 16;
24 module_param(aoe_maxout, int, 0644);
25 MODULE_PARM_DESC(aoe_maxout,
26 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
28 static struct sk_buff *
33 skb = alloc_skb(len, GFP_ATOMIC);
35 skb_reset_mac_header(skb);
36 skb_reset_network_header(skb);
37 skb->protocol = __constant_htons(ETH_P_AOE);
43 getframe(struct aoetgt *t, int tag)
56 * Leave the top bit clear so we have tagspace for userland.
57 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
58 * This driver reserves tag -1 to mean "unused frame."
61 newtag(struct aoetgt *t)
66 return n |= (++t->lasttag & 0x7fff) << 16;
70 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
72 u32 host_tag = newtag(t);
74 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
75 memcpy(h->dst, t->addr, sizeof h->dst);
76 h->type = __constant_cpu_to_be16(ETH_P_AOE);
78 h->major = cpu_to_be16(d->aoemajor);
79 h->minor = d->aoeminor;
81 h->tag = cpu_to_be32(host_tag);
87 put_lba(struct aoe_atahdr *ah, sector_t lba)
98 ifrotate(struct aoetgt *t)
101 if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL)
103 if (t->ifp->nd == NULL) {
104 printk(KERN_INFO "aoe: no interface to rotate to\n");
110 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
112 __skb_queue_tail(&d->skbpool, skb);
115 static struct sk_buff *
116 skb_pool_get(struct aoedev *d)
118 struct sk_buff *skb = skb_peek(&d->skbpool);
120 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
121 __skb_unlink(skb, &d->skbpool);
124 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
125 (skb = new_skb(ETH_ZLEN)))
131 /* freeframe is where we do our load balancing so it's a little hairy. */
132 static struct frame *
133 freeframe(struct aoedev *d)
135 struct frame *f, *e, *rf;
139 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
140 printk(KERN_ERR "aoe: NULL TARGETS!\n");
145 if (t >= &d->targets[NTARGETS] || !*t)
148 if ((*t)->nout < (*t)->maxout
153 e = f + (*t)->nframes;
155 if (f->tag != FREETAG)
159 && !(f->skb = skb = new_skb(ETH_ZLEN)))
161 if (atomic_read(&skb_shinfo(skb)->dataref)
167 gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0;
173 /* Work can be done, but the network layer is
174 holding our precious packets. Try to grab
175 one from the pool. */
177 if (f == NULL) { /* more paranoia */
179 "aoe: freeframe: %s.\n",
180 "unexpected null rf");
181 d->flags |= DEVFL_KICKME;
184 skb = skb_pool_get(d);
186 skb_pool_put(d, f->skb);
192 d->flags |= DEVFL_KICKME;
194 if (t == d->tgt) /* we've looped and found nada */
197 if (t >= &d->targets[NTARGETS] || !*t)
204 aoecmd_ata_rw(struct aoedev *d)
208 struct aoe_atahdr *ah;
214 char writebit, extbit;
225 bcnt = t->ifp->maxbcnt;
228 if (bcnt > buf->bv_resid)
229 bcnt = buf->bv_resid;
230 /* initialize the headers & frame */
232 h = (struct aoe_hdr *) skb_mac_header(skb);
233 ah = (struct aoe_atahdr *) (h+1);
234 skb_put(skb, sizeof *h + sizeof *ah);
235 memset(h, 0, skb->len);
236 f->tag = aoehdr_atainit(d, t, h);
240 f->bufaddr = page_address(bv->bv_page) + buf->bv_off;
242 f->lba = buf->sector;
244 /* set up ata header */
245 ah->scnt = bcnt >> 9;
246 put_lba(ah, buf->sector);
247 if (d->flags & DEVFL_EXT) {
248 ah->aflags |= AOEAFL_EXT;
252 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
254 if (bio_data_dir(buf->bio) == WRITE) {
255 skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt);
256 ah->aflags |= AOEAFL_WRITE;
258 skb->data_len = bcnt;
265 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
267 /* mark all tracking fields and load out */
268 buf->nframesout += 1;
270 buf->bv_resid -= bcnt;
272 buf->sector += bcnt >> 9;
273 if (buf->resid == 0) {
275 } else if (buf->bv_resid == 0) {
277 buf->bv_resid = bv->bv_len;
278 WARN_ON(buf->bv_resid == 0);
279 buf->bv_off = bv->bv_offset;
282 skb->dev = t->ifp->nd;
283 skb = skb_clone(skb, GFP_ATOMIC);
285 __skb_queue_tail(&d->sendq, skb);
289 /* some callers cannot sleep, and they can call this function,
290 * transmitting the packets later, when interrupts are on
293 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
296 struct aoe_cfghdr *ch;
298 struct net_device *ifp;
301 for_each_netdev_rcu(&init_net, ifp) {
303 if (!is_aoe_netif(ifp))
306 skb = new_skb(sizeof *h + sizeof *ch);
308 printk(KERN_INFO "aoe: skb alloc failure\n");
311 skb_put(skb, sizeof *h + sizeof *ch);
313 __skb_queue_tail(queue, skb);
314 h = (struct aoe_hdr *) skb_mac_header(skb);
315 memset(h, 0, sizeof *h + sizeof *ch);
317 memset(h->dst, 0xff, sizeof h->dst);
318 memcpy(h->src, ifp->dev_addr, sizeof h->src);
319 h->type = __constant_cpu_to_be16(ETH_P_AOE);
321 h->major = cpu_to_be16(aoemajor);
332 resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
336 struct aoe_atahdr *ah;
343 h = (struct aoe_hdr *) skb_mac_header(skb);
344 ah = (struct aoe_atahdr *) (h+1);
346 snprintf(buf, sizeof buf,
347 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
348 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
349 h->src, h->dst, t->nout);
353 h->tag = cpu_to_be32(n);
354 memcpy(h->dst, t->addr, sizeof h->dst);
355 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
357 switch (ah->cmdstat) {
360 case ATA_CMD_PIO_READ:
361 case ATA_CMD_PIO_READ_EXT:
362 case ATA_CMD_PIO_WRITE:
363 case ATA_CMD_PIO_WRITE_EXT:
370 if (ah->aflags & AOEAFL_WRITE) {
371 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
372 offset_in_page(f->bufaddr), n);
373 skb->len = sizeof *h + sizeof *ah + n;
377 skb->dev = t->ifp->nd;
378 skb = skb_clone(skb, GFP_ATOMIC);
381 __skb_queue_tail(&d->sendq, skb);
389 n = jiffies & 0xffff;
396 static struct aoeif *
397 getif(struct aoetgt *t, struct net_device *nd)
409 static struct aoeif *
410 addif(struct aoetgt *t, struct net_device *nd)
418 p->maxbcnt = DEFAULTBCNT;
425 ejectif(struct aoetgt *t, struct aoeif *ifp)
430 e = t->ifs + NAOEIFS - 1;
431 n = (e - ifp) * sizeof *ifp;
432 memmove(ifp, ifp+1, n);
437 sthtith(struct aoedev *d)
439 struct frame *f, *e, *nf;
441 struct aoetgt *ht = *d->htgt;
446 if (f->tag == FREETAG)
458 resend(d, *d->tgt, nf);
460 /* he's clean, he's useless. take away his interfaces */
461 memset(ht->ifs, 0, sizeof ht->ifs);
466 static inline unsigned char
467 ata_scnt(unsigned char *packet) {
469 struct aoe_atahdr *ah;
471 h = (struct aoe_hdr *) packet;
472 ah = (struct aoe_atahdr *) (h+1);
477 rexmit_timer(ulong vp)
479 struct sk_buff_head queue;
481 struct aoetgt *t, **tt, **te;
484 register long timeout;
487 d = (struct aoedev *) vp;
489 /* timeout is always ~150% of the moving average */
491 timeout += timeout >> 1;
493 spin_lock_irqsave(&d->lock, flags);
495 if (d->flags & DEVFL_TKILL) {
496 spin_unlock_irqrestore(&d->lock, flags);
501 for (; tt < te && *tt; tt++) {
506 if (f->tag == FREETAG
507 || tsince(f->tag) < timeout)
509 n = f->waited += timeout;
511 if (n > aoe_deadsecs) {
512 /* waited too long. device failure. */
517 if (n > HELPWAIT /* see if another target can help */
518 && (tt != d->targets || d->targets[1]))
521 if (t->nout == t->maxout) {
524 t->lastwadj = jiffies;
527 ifp = getif(t, f->skb->dev);
528 if (ifp && ++ifp->lost > (t->nframes << 1)
529 && (ifp != t->ifs || t->ifs[1].nd)) {
534 if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512
535 && ifp && ++ifp->lostjumbo > (t->nframes << 1)
536 && ifp->maxbcnt != DEFAULTBCNT) {
539 "too many lost jumbo on "
541 "falling back to %d frames.\n",
542 d->aoemajor, d->aoeminor,
543 ifp->nd->name, t->addr,
551 if (t->nout == t->maxout
552 && t->maxout < t->nframes
553 && (jiffies - t->lastwadj)/HZ > 10) {
555 t->lastwadj = jiffies;
559 if (!skb_queue_empty(&d->sendq)) {
562 d->rttavg = MAXTIMER;
565 if (d->flags & DEVFL_KICKME || d->htgt) {
566 d->flags &= ~DEVFL_KICKME;
570 __skb_queue_head_init(&queue);
571 skb_queue_splice_init(&d->sendq, &queue);
573 d->timer.expires = jiffies + TIMERTICK;
574 add_timer(&d->timer);
576 spin_unlock_irqrestore(&d->lock, flags);
581 /* enters with d->lock held */
583 aoecmd_work(struct aoedev *d)
587 if (d->htgt && !sthtith(d))
589 if (d->inprocess == NULL) {
590 if (list_empty(&d->bufq))
592 buf = container_of(d->bufq.next, struct buf, bufs);
593 list_del(d->bufq.next);
596 if (aoecmd_ata_rw(d))
600 /* this function performs work that has been deferred until sleeping is OK
603 aoecmd_sleepwork(struct work_struct *work)
605 struct aoedev *d = container_of(work, struct aoedev, work);
607 if (d->flags & DEVFL_GDALLOC)
610 if (d->flags & DEVFL_NEWSIZE) {
611 struct block_device *bd;
615 ssize = get_capacity(d->gd);
616 bd = bdget_disk(d->gd, 0);
619 mutex_lock(&bd->bd_inode->i_mutex);
620 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
621 mutex_unlock(&bd->bd_inode->i_mutex);
624 spin_lock_irqsave(&d->lock, flags);
625 d->flags |= DEVFL_UP;
626 d->flags &= ~DEVFL_NEWSIZE;
627 spin_unlock_irqrestore(&d->lock, flags);
632 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
637 /* word 83: command set supported */
638 n = get_unaligned_le16(&id[83 << 1]);
640 /* word 86: command set/feature enabled */
641 n |= get_unaligned_le16(&id[86 << 1]);
643 if (n & (1<<10)) { /* bit 10: LBA 48 */
644 d->flags |= DEVFL_EXT;
646 /* word 100: number lba48 sectors */
647 ssize = get_unaligned_le64(&id[100 << 1]);
649 /* set as in ide-disk.c:init_idedisk_capacity */
650 d->geo.cylinders = ssize;
651 d->geo.cylinders /= (255 * 63);
655 d->flags &= ~DEVFL_EXT;
657 /* number lba28 sectors */
658 ssize = get_unaligned_le32(&id[60 << 1]);
660 /* NOTE: obsolete in ATA 6 */
661 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
662 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
663 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
666 if (d->ssize != ssize)
668 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
670 d->aoemajor, d->aoeminor,
671 d->fw_ver, (long long)ssize);
674 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
677 set_capacity(d->gd, ssize);
678 d->flags |= DEVFL_NEWSIZE;
680 d->flags |= DEVFL_GDALLOC;
681 schedule_work(&d->work);
685 calc_rttavg(struct aoedev *d, int rtt)
694 else if (n > MAXTIMER)
696 d->mintimer += (n - d->mintimer) >> 1;
697 } else if (n < d->mintimer)
699 else if (n > MAXTIMER)
702 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
707 static struct aoetgt *
708 gettgt(struct aoedev *d, char *addr)
710 struct aoetgt **t, **e;
714 for (; t < e && *t; t++)
715 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
721 diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector)
723 unsigned long n_sect = bio->bi_size >> 9;
724 const int rw = bio_data_dir(bio);
725 struct hd_struct *part;
728 cpu = part_stat_lock();
729 part = disk_map_sector_rcu(disk, sector);
731 part_stat_inc(cpu, part, ios[rw]);
732 part_stat_add(cpu, part, ticks[rw], duration);
733 part_stat_add(cpu, part, sectors[rw], n_sect);
734 part_stat_add(cpu, part, io_ticks, duration);
740 aoecmd_ata_rsp(struct sk_buff *skb)
742 struct sk_buff_head queue;
744 struct aoe_hdr *hin, *hout;
745 struct aoe_atahdr *ahin, *ahout;
755 hin = (struct aoe_hdr *) skb_mac_header(skb);
756 aoemajor = get_unaligned_be16(&hin->major);
757 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
759 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
760 "for unknown device %d.%d\n",
761 aoemajor, hin->minor);
766 spin_lock_irqsave(&d->lock, flags);
768 n = get_unaligned_be32(&hin->tag);
769 t = gettgt(d, hin->src);
771 printk(KERN_INFO "aoe: can't find target e%ld.%d:%pm\n",
772 d->aoemajor, d->aoeminor, hin->src);
773 spin_unlock_irqrestore(&d->lock, flags);
778 calc_rttavg(d, -tsince(n));
779 spin_unlock_irqrestore(&d->lock, flags);
780 snprintf(ebuf, sizeof ebuf,
781 "%15s e%d.%d tag=%08x@%08lx\n",
783 get_unaligned_be16(&hin->major),
785 get_unaligned_be32(&hin->tag),
791 calc_rttavg(d, tsince(f->tag));
793 ahin = (struct aoe_atahdr *) (hin+1);
794 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
795 ahout = (struct aoe_atahdr *) (hout+1);
798 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
800 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
801 ahout->cmdstat, ahin->cmdstat,
802 d->aoemajor, d->aoeminor);
804 buf->flags |= BUFFL_FAIL;
806 if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */
808 n = ahout->scnt << 9;
809 switch (ahout->cmdstat) {
810 case ATA_CMD_PIO_READ:
811 case ATA_CMD_PIO_READ_EXT:
812 if (skb->len - sizeof *hin - sizeof *ahin < n) {
814 "aoe: %s. skb->len=%d need=%ld\n",
815 "runt data size in read", skb->len, n);
816 /* fail frame f? just returning will rexmit. */
817 spin_unlock_irqrestore(&d->lock, flags);
820 memcpy(f->bufaddr, ahin+1, n);
821 case ATA_CMD_PIO_WRITE:
822 case ATA_CMD_PIO_WRITE_EXT:
823 ifp = getif(t, skb->dev);
837 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
839 "aoe: runt data size in ataid. skb->len=%d\n",
841 spin_unlock_irqrestore(&d->lock, flags);
844 ataid_complete(d, t, (char *) (ahin+1));
848 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
850 get_unaligned_be16(&hin->major),
855 if (buf && --buf->nframesout == 0 && buf->resid == 0) {
856 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector);
857 if (buf->flags & BUFFL_FAIL)
858 bio_endio(buf->bio, -EIO);
860 bio_flush_dcache_pages(buf->bio);
861 bio_endio(buf->bio, 0);
863 mempool_free(buf, d->bufpool);
872 __skb_queue_head_init(&queue);
873 skb_queue_splice_init(&d->sendq, &queue);
875 spin_unlock_irqrestore(&d->lock, flags);
880 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
882 struct sk_buff_head queue;
884 __skb_queue_head_init(&queue);
885 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
890 aoecmd_ata_id(struct aoedev *d)
893 struct aoe_atahdr *ah;
904 /* initialize the headers & frame */
906 h = (struct aoe_hdr *) skb_mac_header(skb);
907 ah = (struct aoe_atahdr *) (h+1);
908 skb_put(skb, sizeof *h + sizeof *ah);
909 memset(h, 0, skb->len);
910 f->tag = aoehdr_atainit(d, t, h);
914 /* set up ata header */
916 ah->cmdstat = ATA_CMD_ID_ATA;
919 skb->dev = t->ifp->nd;
921 d->rttavg = MAXTIMER;
922 d->timer.function = rexmit_timer;
924 return skb_clone(skb, GFP_ATOMIC);
927 static struct aoetgt *
928 addtgt(struct aoedev *d, char *addr, ulong nframes)
930 struct aoetgt *t, **tt, **te;
935 for (; tt < te && *tt; tt++)
940 "aoe: device addtgt failure; too many targets\n");
943 t = kcalloc(1, sizeof *t, GFP_ATOMIC);
944 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
948 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
952 t->nframes = nframes;
957 memcpy(t->addr, addr, sizeof t->addr);
959 t->maxout = t->nframes;
964 aoecmd_cfg_rsp(struct sk_buff *skb)
968 struct aoe_cfghdr *ch;
971 ulong flags, sysminor, aoemajor;
975 h = (struct aoe_hdr *) skb_mac_header(skb);
976 ch = (struct aoe_cfghdr *) (h+1);
979 * Enough people have their dip switches set backwards to
980 * warrant a loud message for this special case.
982 aoemajor = get_unaligned_be16(&h->major);
983 if (aoemajor == 0xfff) {
984 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
985 "Check shelf dip switches.\n");
989 sysminor = SYSMINOR(aoemajor, h->minor);
990 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
991 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
992 aoemajor, (int) h->minor);
996 n = be16_to_cpu(ch->bufcnt);
997 if (n > aoe_maxout) /* keep it reasonable */
1000 d = aoedev_by_sysminor_m(sysminor);
1002 printk(KERN_INFO "aoe: device sysminor_m failure\n");
1006 spin_lock_irqsave(&d->lock, flags);
1008 t = gettgt(d, h->src);
1010 t = addtgt(d, h->src, n);
1012 spin_unlock_irqrestore(&d->lock, flags);
1016 ifp = getif(t, skb->dev);
1018 ifp = addif(t, skb->dev);
1021 "aoe: device addif failure; "
1022 "too many interfaces?\n");
1023 spin_unlock_irqrestore(&d->lock, flags);
1029 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
1033 n = n ? n * 512 : DEFAULTBCNT;
1034 if (n != ifp->maxbcnt) {
1036 "aoe: e%ld.%d: setting %d%s%s:%pm\n",
1037 d->aoemajor, d->aoeminor, n,
1038 " byte data frames on ", ifp->nd->name,
1044 /* don't change users' perspective */
1046 spin_unlock_irqrestore(&d->lock, flags);
1049 d->fw_ver = be16_to_cpu(ch->fwver);
1051 sl = aoecmd_ata_id(d);
1053 spin_unlock_irqrestore(&d->lock, flags);
1056 struct sk_buff_head queue;
1057 __skb_queue_head_init(&queue);
1058 __skb_queue_tail(&queue, sl);
1059 aoenet_xmit(&queue);
1064 aoecmd_cleanslate(struct aoedev *d)
1066 struct aoetgt **t, **te;
1067 struct aoeif *p, *e;
1069 d->mintimer = MINTIMER;
1073 for (; t < te && *t; t++) {
1074 (*t)->maxout = (*t)->nframes;
1077 for (; p < e; p++) {
1080 p->maxbcnt = DEFAULTBCNT;