2 * Copyright (C) 2015 IT University of Copenhagen
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
31 struct nvm_tgt_dev *dev = rrpc->dev;
32 struct rrpc_block *rblk = a->rblk;
33 unsigned int pg_offset;
35 lockdep_assert_held(&rrpc->rev_lock);
37 if (a->addr == ADDR_EMPTY || !rblk)
40 spin_lock(&rblk->lock);
42 div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
43 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
44 rblk->nr_invalid_pages++;
46 spin_unlock(&rblk->lock);
48 rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
51 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
56 spin_lock(&rrpc->rev_lock);
57 for (i = slba; i < slba + len; i++) {
58 struct rrpc_addr *gp = &rrpc->trans_map[i];
60 rrpc_page_invalidate(rrpc, gp);
63 spin_unlock(&rrpc->rev_lock);
66 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
67 sector_t laddr, unsigned int pages)
70 struct rrpc_inflight_rq *inf;
72 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
74 return ERR_PTR(-ENOMEM);
76 inf = rrpc_get_inflight_rq(rqd);
77 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
78 mempool_free(rqd, rrpc->rq_pool);
85 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
87 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
89 rrpc_unlock_laddr(rrpc, inf);
91 mempool_free(rqd, rrpc->rq_pool);
94 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
96 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
97 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
101 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
109 pr_err("rrpc: unable to acquire inflight IO\n");
114 rrpc_invalidate_range(rrpc, slba, len);
115 rrpc_inflight_laddr_release(rrpc, rqd);
118 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
120 struct nvm_tgt_dev *dev = rrpc->dev;
122 return (rblk->next_page == dev->geo.sec_per_blk);
125 /* Calculate relative addr for the given block, considering instantiated LUNs */
126 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
128 struct nvm_tgt_dev *dev = rrpc->dev;
129 struct rrpc_lun *rlun = rblk->rlun;
131 return rlun->id * dev->geo.sec_per_blk;
134 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
135 struct rrpc_addr *gp)
137 struct rrpc_block *rblk = gp->rblk;
138 struct rrpc_lun *rlun = rblk->rlun;
140 struct ppa_addr paddr;
143 paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
144 paddr.g.ch = rlun->bppa.g.ch;
145 paddr.g.lun = rlun->bppa.g.lun;
146 paddr.g.blk = rblk->id;
151 /* requires lun->lock taken */
152 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
153 struct rrpc_block **cur_rblk)
155 struct rrpc *rrpc = rlun->rrpc;
158 spin_lock(&(*cur_rblk)->lock);
159 WARN_ON(!block_is_full(rrpc, *cur_rblk));
160 spin_unlock(&(*cur_rblk)->lock);
162 *cur_rblk = new_rblk;
165 static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
166 struct rrpc_lun *rlun)
168 struct rrpc_block *rblk = NULL;
170 if (list_empty(&rlun->free_list))
173 rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
175 list_move_tail(&rblk->list, &rlun->used_list);
176 rblk->state = NVM_BLK_ST_TGT;
177 rlun->nr_free_blocks--;
183 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
186 struct nvm_tgt_dev *dev = rrpc->dev;
187 struct rrpc_block *rblk;
188 int is_gc = flags & NVM_IOTYPE_GC;
190 spin_lock(&rlun->lock);
191 if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
192 pr_err("nvm: rrpc: cannot give block to non GC request\n");
193 spin_unlock(&rlun->lock);
197 rblk = __rrpc_get_blk(rrpc, rlun);
199 pr_err("nvm: rrpc: cannot get new block\n");
200 spin_unlock(&rlun->lock);
203 spin_unlock(&rlun->lock);
205 bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
207 rblk->nr_invalid_pages = 0;
208 atomic_set(&rblk->data_cmnt_size, 0);
213 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
215 struct rrpc_lun *rlun = rblk->rlun;
217 spin_lock(&rlun->lock);
218 if (rblk->state & NVM_BLK_ST_TGT) {
219 list_move_tail(&rblk->list, &rlun->free_list);
220 rlun->nr_free_blocks++;
221 rblk->state = NVM_BLK_ST_FREE;
222 } else if (rblk->state & NVM_BLK_ST_BAD) {
223 list_move_tail(&rblk->list, &rlun->bb_list);
224 rblk->state = NVM_BLK_ST_BAD;
227 pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
228 rlun->bppa.g.ch, rlun->bppa.g.lun,
229 rblk->id, rblk->state);
230 list_move_tail(&rblk->list, &rlun->bb_list);
232 spin_unlock(&rlun->lock);
235 static void rrpc_put_blks(struct rrpc *rrpc)
237 struct rrpc_lun *rlun;
240 for (i = 0; i < rrpc->nr_luns; i++) {
241 rlun = &rrpc->luns[i];
243 rrpc_put_blk(rrpc, rlun->cur);
245 rrpc_put_blk(rrpc, rlun->gc_cur);
249 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
251 int next = atomic_inc_return(&rrpc->next_lun);
253 return &rrpc->luns[next % rrpc->nr_luns];
256 static void rrpc_gc_kick(struct rrpc *rrpc)
258 struct rrpc_lun *rlun;
261 for (i = 0; i < rrpc->nr_luns; i++) {
262 rlun = &rrpc->luns[i];
263 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
268 * timed GC every interval.
270 static void rrpc_gc_timer(unsigned long data)
272 struct rrpc *rrpc = (struct rrpc *)data;
275 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
278 static void rrpc_end_sync_bio(struct bio *bio)
280 struct completion *waiting = bio->bi_private;
283 pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
289 * rrpc_move_valid_pages -- migrate live data off the block
290 * @rrpc: the 'rrpc' structure
291 * @block: the block from which to migrate live pages
294 * GC algorithms may call this function to migrate remaining live
295 * pages off the block prior to erasing it. This function blocks
296 * further execution until the operation is complete.
298 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
300 struct nvm_tgt_dev *dev = rrpc->dev;
301 struct request_queue *q = dev->q;
302 struct rrpc_rev_addr *rev;
307 int nr_sec_per_blk = dev->geo.sec_per_blk;
309 DECLARE_COMPLETION_ONSTACK(wait);
311 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
314 bio = bio_alloc(GFP_NOIO, 1);
316 pr_err("nvm: could not alloc bio to gc\n");
320 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
322 while ((slot = find_first_zero_bit(rblk->invalid_pages,
323 nr_sec_per_blk)) < nr_sec_per_blk) {
326 phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
329 spin_lock(&rrpc->rev_lock);
330 /* Get logical address from physical to logical table */
331 rev = &rrpc->rev_trans_map[phys_addr];
332 /* already updated by previous regular write */
333 if (rev->addr == ADDR_EMPTY) {
334 spin_unlock(&rrpc->rev_lock);
338 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
339 if (IS_ERR_OR_NULL(rqd)) {
340 spin_unlock(&rrpc->rev_lock);
345 spin_unlock(&rrpc->rev_lock);
347 /* Perform read to do GC */
348 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
349 bio_set_op_attrs(bio, REQ_OP_READ, 0);
350 bio->bi_private = &wait;
351 bio->bi_end_io = rrpc_end_sync_bio;
353 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
354 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
356 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
357 pr_err("rrpc: gc read failed.\n");
358 rrpc_inflight_laddr_release(rrpc, rqd);
361 wait_for_completion_io(&wait);
362 if (bio->bi_status) {
363 rrpc_inflight_laddr_release(rrpc, rqd);
368 reinit_completion(&wait);
370 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
371 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
372 bio->bi_private = &wait;
373 bio->bi_end_io = rrpc_end_sync_bio;
375 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
377 /* turn the command around and write the data back to a new
380 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
381 pr_err("rrpc: gc write failed.\n");
382 rrpc_inflight_laddr_release(rrpc, rqd);
385 wait_for_completion_io(&wait);
387 rrpc_inflight_laddr_release(rrpc, rqd);
395 mempool_free(page, rrpc->page_pool);
398 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
399 pr_err("nvm: failed to garbage collect block\n");
406 static void rrpc_block_gc(struct work_struct *work)
408 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
410 struct rrpc *rrpc = gcb->rrpc;
411 struct rrpc_block *rblk = gcb->rblk;
412 struct rrpc_lun *rlun = rblk->rlun;
415 mempool_free(gcb, rrpc->gcb_pool);
416 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
417 rlun->bppa.g.ch, rlun->bppa.g.lun,
420 if (rrpc_move_valid_pages(rrpc, rblk))
424 ppa.g.ch = rlun->bppa.g.ch;
425 ppa.g.lun = rlun->bppa.g.lun;
426 ppa.g.blk = rblk->id;
428 if (nvm_erase_sync(rrpc->dev, &ppa, 1))
431 rrpc_put_blk(rrpc, rblk);
436 spin_lock(&rlun->lock);
437 list_add_tail(&rblk->prio, &rlun->prio_list);
438 spin_unlock(&rlun->lock);
441 /* the block with highest number of invalid pages, will be in the beginning
444 static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
445 struct rrpc_block *rb)
447 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
450 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
453 /* linearly find the block with highest number of invalid pages
456 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
458 struct list_head *prio_list = &rlun->prio_list;
459 struct rrpc_block *rblk, *max;
461 BUG_ON(list_empty(prio_list));
463 max = list_first_entry(prio_list, struct rrpc_block, prio);
464 list_for_each_entry(rblk, prio_list, prio)
465 max = rblk_max_invalid(max, rblk);
470 static void rrpc_lun_gc(struct work_struct *work)
472 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
473 struct rrpc *rrpc = rlun->rrpc;
474 struct nvm_tgt_dev *dev = rrpc->dev;
475 struct rrpc_block_gc *gcb;
476 unsigned int nr_blocks_need;
478 nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
480 if (nr_blocks_need < rrpc->nr_luns)
481 nr_blocks_need = rrpc->nr_luns;
483 spin_lock(&rlun->lock);
484 while (nr_blocks_need > rlun->nr_free_blocks &&
485 !list_empty(&rlun->prio_list)) {
486 struct rrpc_block *rblk = block_prio_find_max(rlun);
488 if (!rblk->nr_invalid_pages)
491 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
495 list_del_init(&rblk->prio);
497 WARN_ON(!block_is_full(rrpc, rblk));
499 pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
500 rlun->bppa.g.ch, rlun->bppa.g.lun,
505 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
507 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
511 spin_unlock(&rlun->lock);
513 /* TODO: Hint that request queue can be started again */
516 static void rrpc_gc_queue(struct work_struct *work)
518 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
520 struct rrpc *rrpc = gcb->rrpc;
521 struct rrpc_block *rblk = gcb->rblk;
522 struct rrpc_lun *rlun = rblk->rlun;
524 spin_lock(&rlun->lock);
525 list_add_tail(&rblk->prio, &rlun->prio_list);
526 spin_unlock(&rlun->lock);
528 mempool_free(gcb, rrpc->gcb_pool);
529 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
530 rlun->bppa.g.ch, rlun->bppa.g.lun,
534 static const struct block_device_operations rrpc_fops = {
535 .owner = THIS_MODULE,
538 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
541 struct rrpc_lun *rlun, *max_free;
544 return get_next_lun(rrpc);
546 /* during GC, we don't care about RR, instead we want to make
547 * sure that we maintain evenness between the block luns.
549 max_free = &rrpc->luns[0];
550 /* prevent GC-ing lun from devouring pages of a lun with
551 * little free blocks. We don't take the lock as we only need an
554 rrpc_for_each_lun(rrpc, rlun, i) {
555 if (rlun->nr_free_blocks > max_free->nr_free_blocks)
562 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
563 struct rrpc_block *rblk, u64 paddr)
565 struct rrpc_addr *gp;
566 struct rrpc_rev_addr *rev;
568 BUG_ON(laddr >= rrpc->nr_sects);
570 gp = &rrpc->trans_map[laddr];
571 spin_lock(&rrpc->rev_lock);
573 rrpc_page_invalidate(rrpc, gp);
578 rev = &rrpc->rev_trans_map[gp->addr];
580 spin_unlock(&rrpc->rev_lock);
585 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
587 u64 addr = ADDR_EMPTY;
589 spin_lock(&rblk->lock);
590 if (block_is_full(rrpc, rblk))
593 addr = rblk->next_page;
597 spin_unlock(&rblk->lock);
601 /* Map logical address to a physical page. The mapping implements a round robin
602 * approach and allocates a page from the next lun available.
604 * Returns rrpc_addr with the physical address and block. Returns NULL if no
605 * blocks in the next rlun are available.
607 static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
610 struct nvm_tgt_dev *tgt_dev = rrpc->dev;
611 struct rrpc_lun *rlun;
612 struct rrpc_block *rblk, **cur_rblk;
618 ppa.ppa = ADDR_EMPTY;
619 rlun = rrpc_get_lun_rr(rrpc, is_gc);
621 if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
625 * page allocation steps:
626 * 1. Try to allocate new page from current rblk
627 * 2a. If succeed, proceed to map it in and return
628 * 2b. If fail, first try to allocate a new block from media manger,
629 * and then retry step 1. Retry until the normal block pool is
631 * 3. If exhausted, and garbage collector is requesting the block,
632 * go to the reserved block and retry step 1.
633 * In the case that this fails as well, or it is not GC
634 * requesting, report not able to retrieve a block and let the
635 * caller handle further processing.
638 spin_lock(&rlun->lock);
639 cur_rblk = &rlun->cur;
642 paddr = rrpc_alloc_addr(rrpc, rblk);
644 if (paddr != ADDR_EMPTY)
647 if (!list_empty(&rlun->wblk_list)) {
649 rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
651 rrpc_set_lun_cur(rlun, rblk, cur_rblk);
652 list_del(&rblk->prio);
655 spin_unlock(&rlun->lock);
657 rblk = rrpc_get_blk(rrpc, rlun, gc_force);
659 spin_lock(&rlun->lock);
660 list_add_tail(&rblk->prio, &rlun->wblk_list);
662 * another thread might already have added a new block,
663 * Therefore, make sure that one is used, instead of the
669 if (unlikely(is_gc) && !gc_force) {
670 /* retry from emergency gc block */
671 cur_rblk = &rlun->gc_cur;
674 spin_lock(&rlun->lock);
678 pr_err("rrpc: failed to allocate new block\n");
681 spin_unlock(&rlun->lock);
682 p = rrpc_update_map(rrpc, laddr, rblk, paddr);
686 /* return global address */
687 return rrpc_ppa_to_gaddr(tgt_dev, p);
690 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
692 struct rrpc_block_gc *gcb;
694 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
696 pr_err("rrpc: unable to queue block for gc.");
703 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
704 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
707 static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
709 struct rrpc_lun *rlun = NULL;
712 for (i = 0; i < rrpc->nr_luns; i++) {
713 if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
714 rrpc->luns[i].bppa.g.lun == p.g.lun) {
715 rlun = &rrpc->luns[i];
723 static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
725 struct nvm_tgt_dev *dev = rrpc->dev;
726 struct rrpc_lun *rlun;
727 struct rrpc_block *rblk;
729 rlun = rrpc_ppa_to_lun(rrpc, ppa);
730 rblk = &rlun->blocks[ppa.g.blk];
731 rblk->state = NVM_BLK_ST_BAD;
733 nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
736 static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
738 void *comp_bits = &rqd->ppa_status;
739 struct ppa_addr ppa, prev_ppa;
740 int nr_ppas = rqd->nr_ppas;
743 if (rqd->nr_ppas == 1)
744 __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
746 ppa_set_empty(&prev_ppa);
748 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
749 ppa = rqd->ppa_list[bit];
750 if (ppa_cmp_blk(ppa, prev_ppa))
753 __rrpc_mark_bad_block(rrpc, ppa);
757 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
758 sector_t laddr, uint8_t npages)
760 struct nvm_tgt_dev *dev = rrpc->dev;
762 struct rrpc_block *rblk;
765 for (i = 0; i < npages; i++) {
766 p = &rrpc->trans_map[laddr + i];
769 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
770 if (unlikely(cmnt_size == dev->geo.sec_per_blk))
771 rrpc_run_gc(rrpc, rblk);
775 static void rrpc_end_io(struct nvm_rq *rqd)
777 struct rrpc *rrpc = rqd->private;
778 struct nvm_tgt_dev *dev = rrpc->dev;
779 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
780 uint8_t npages = rqd->nr_ppas;
781 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
783 if (bio_data_dir(rqd->bio) == WRITE) {
784 if (rqd->error == NVM_RSP_ERR_FAILWRITE)
785 rrpc_mark_bad_block(rrpc, rqd);
787 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
792 if (rrqd->flags & NVM_IOTYPE_GC)
795 rrpc_unlock_rq(rrpc, rqd);
798 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
800 mempool_free(rqd, rrpc->rq_pool);
803 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
804 struct nvm_rq *rqd, unsigned long flags, int npages)
806 struct nvm_tgt_dev *dev = rrpc->dev;
807 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
808 struct rrpc_addr *gp;
809 sector_t laddr = rrpc_get_laddr(bio);
810 int is_gc = flags & NVM_IOTYPE_GC;
813 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
814 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
815 return NVM_IO_REQUEUE;
818 for (i = 0; i < npages; i++) {
819 /* We assume that mapping occurs at 4KB granularity */
820 BUG_ON(!(laddr + i < rrpc->nr_sects));
821 gp = &rrpc->trans_map[laddr + i];
824 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
827 rrpc_unlock_laddr(rrpc, r);
828 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
834 rqd->opcode = NVM_OP_HBREAD;
839 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
842 int is_gc = flags & NVM_IOTYPE_GC;
843 sector_t laddr = rrpc_get_laddr(bio);
844 struct rrpc_addr *gp;
846 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
847 return NVM_IO_REQUEUE;
849 BUG_ON(!(laddr < rrpc->nr_sects));
850 gp = &rrpc->trans_map[laddr];
853 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
856 rrpc_unlock_rq(rrpc, rqd);
860 rqd->opcode = NVM_OP_HBREAD;
865 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
866 struct nvm_rq *rqd, unsigned long flags, int npages)
868 struct nvm_tgt_dev *dev = rrpc->dev;
869 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
871 sector_t laddr = rrpc_get_laddr(bio);
872 int is_gc = flags & NVM_IOTYPE_GC;
875 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
876 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
877 return NVM_IO_REQUEUE;
880 for (i = 0; i < npages; i++) {
881 /* We assume that mapping occurs at 4KB granularity */
882 p = rrpc_map_page(rrpc, laddr + i, is_gc);
883 if (p.ppa == ADDR_EMPTY) {
885 rrpc_unlock_laddr(rrpc, r);
886 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
889 return NVM_IO_REQUEUE;
892 rqd->ppa_list[i] = p;
895 rqd->opcode = NVM_OP_HBWRITE;
900 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
901 struct nvm_rq *rqd, unsigned long flags)
904 int is_gc = flags & NVM_IOTYPE_GC;
905 sector_t laddr = rrpc_get_laddr(bio);
907 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
908 return NVM_IO_REQUEUE;
910 p = rrpc_map_page(rrpc, laddr, is_gc);
911 if (p.ppa == ADDR_EMPTY) {
913 rrpc_unlock_rq(rrpc, rqd);
915 return NVM_IO_REQUEUE;
919 rqd->opcode = NVM_OP_HBWRITE;
924 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
925 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
927 struct nvm_tgt_dev *dev = rrpc->dev;
930 rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
932 if (!rqd->ppa_list) {
933 pr_err("rrpc: not able to allocate ppa list\n");
937 if (bio_op(bio) == REQ_OP_WRITE)
938 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
941 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
944 if (bio_op(bio) == REQ_OP_WRITE)
945 return rrpc_write_rq(rrpc, bio, rqd, flags);
947 return rrpc_read_rq(rrpc, bio, rqd, flags);
950 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
951 struct nvm_rq *rqd, unsigned long flags)
953 struct nvm_tgt_dev *dev = rrpc->dev;
954 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
955 uint8_t nr_pages = rrpc_get_pages(bio);
956 int bio_size = bio_sectors(bio) << 9;
959 if (bio_size < dev->geo.sec_size)
961 else if (bio_size > dev->geo.max_rq_size)
964 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
971 rqd->nr_ppas = nr_pages;
972 rqd->end_io = rrpc_end_io;
975 err = nvm_submit_io(dev, rqd);
977 pr_err("rrpc: I/O submission failed: %d\n", err);
979 if (!(flags & NVM_IOTYPE_GC)) {
980 rrpc_unlock_rq(rrpc, rqd);
981 if (rqd->nr_ppas > 1)
982 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
991 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
993 struct rrpc *rrpc = q->queuedata;
997 blk_queue_split(q, &bio);
999 if (bio_op(bio) == REQ_OP_DISCARD) {
1000 rrpc_discard(rrpc, bio);
1001 return BLK_QC_T_NONE;
1004 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
1005 memset(rqd, 0, sizeof(struct nvm_rq));
1007 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
1010 return BLK_QC_T_NONE;
1017 case NVM_IO_REQUEUE:
1018 spin_lock(&rrpc->bio_lock);
1019 bio_list_add(&rrpc->requeue_bios, bio);
1020 spin_unlock(&rrpc->bio_lock);
1021 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
1025 mempool_free(rqd, rrpc->rq_pool);
1026 return BLK_QC_T_NONE;
1029 static void rrpc_requeue(struct work_struct *work)
1031 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
1032 struct bio_list bios;
1035 bio_list_init(&bios);
1037 spin_lock(&rrpc->bio_lock);
1038 bio_list_merge(&bios, &rrpc->requeue_bios);
1039 bio_list_init(&rrpc->requeue_bios);
1040 spin_unlock(&rrpc->bio_lock);
1042 while ((bio = bio_list_pop(&bios)))
1043 rrpc_make_rq(rrpc->disk->queue, bio);
1046 static void rrpc_gc_free(struct rrpc *rrpc)
1049 destroy_workqueue(rrpc->krqd_wq);
1052 destroy_workqueue(rrpc->kgc_wq);
1055 static int rrpc_gc_init(struct rrpc *rrpc)
1057 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
1062 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
1066 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
1071 static void rrpc_map_free(struct rrpc *rrpc)
1073 vfree(rrpc->rev_trans_map);
1074 vfree(rrpc->trans_map);
1077 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1079 struct rrpc *rrpc = (struct rrpc *)private;
1080 struct nvm_tgt_dev *dev = rrpc->dev;
1081 struct rrpc_addr *addr = rrpc->trans_map + slba;
1082 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1083 struct rrpc_lun *rlun;
1084 struct rrpc_block *rblk;
1087 for (i = 0; i < nlb; i++) {
1088 struct ppa_addr gaddr;
1089 u64 pba = le64_to_cpu(entries[i]);
1092 /* LNVM treats address-spaces as silos, LBA and PBA are
1093 * equally large and zero-indexed.
1095 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1096 pr_err("nvm: L2P data entry is out of bounds!\n");
1097 pr_err("nvm: Maybe loaded an old target L2P\n");
1101 /* Address zero is a special one. The first page on a disk is
1102 * protected. As it often holds internal device boot
1108 div_u64_rem(pba, rrpc->nr_sects, &mod);
1110 gaddr = rrpc_recov_addr(dev, pba);
1111 rlun = rrpc_ppa_to_lun(rrpc, gaddr);
1113 pr_err("rrpc: l2p corruption on lba %llu\n",
1118 rblk = &rlun->blocks[gaddr.g.blk];
1120 /* at this point, we don't know anything about the
1121 * block. It's up to the FTL on top to re-etablish the
1122 * block state. The block is assumed to be open.
1124 list_move_tail(&rblk->list, &rlun->used_list);
1125 rblk->state = NVM_BLK_ST_TGT;
1126 rlun->nr_free_blocks--;
1130 addr[i].rblk = rblk;
1131 raddr[mod].addr = slba + i;
1137 static int rrpc_map_init(struct rrpc *rrpc)
1139 struct nvm_tgt_dev *dev = rrpc->dev;
1143 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1144 if (!rrpc->trans_map)
1147 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1149 if (!rrpc->rev_trans_map)
1152 for (i = 0; i < rrpc->nr_sects; i++) {
1153 struct rrpc_addr *p = &rrpc->trans_map[i];
1154 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1156 p->addr = ADDR_EMPTY;
1157 r->addr = ADDR_EMPTY;
1160 /* Bring up the mapping table from device */
1161 ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1162 rrpc_l2p_update, rrpc);
1164 pr_err("nvm: rrpc: could not read L2P table.\n");
1171 /* Minimum pages needed within a lun */
1172 #define PAGE_POOL_SIZE 16
1173 #define ADDR_POOL_SIZE 64
1175 static int rrpc_core_init(struct rrpc *rrpc)
1177 down_write(&rrpc_lock);
1178 if (!rrpc_gcb_cache) {
1179 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1180 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1181 if (!rrpc_gcb_cache) {
1182 up_write(&rrpc_lock);
1186 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1187 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1189 if (!rrpc_rq_cache) {
1190 kmem_cache_destroy(rrpc_gcb_cache);
1191 up_write(&rrpc_lock);
1195 up_write(&rrpc_lock);
1197 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1198 if (!rrpc->page_pool)
1201 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
1203 if (!rrpc->gcb_pool)
1206 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1210 spin_lock_init(&rrpc->inflights.lock);
1211 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1216 static void rrpc_core_free(struct rrpc *rrpc)
1218 mempool_destroy(rrpc->page_pool);
1219 mempool_destroy(rrpc->gcb_pool);
1220 mempool_destroy(rrpc->rq_pool);
1223 static void rrpc_luns_free(struct rrpc *rrpc)
1225 struct rrpc_lun *rlun;
1231 for (i = 0; i < rrpc->nr_luns; i++) {
1232 rlun = &rrpc->luns[i];
1233 vfree(rlun->blocks);
1239 static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
1241 struct nvm_geo *geo = &dev->geo;
1242 struct rrpc_block *rblk;
1243 struct ppa_addr ppa;
1249 if (!dev->parent->ops->get_bb_tbl)
1252 nr_blks = geo->blks_per_lun * geo->plane_mode;
1253 blks = kmalloc(nr_blks, GFP_KERNEL);
1258 ppa.g.ch = rlun->bppa.g.ch;
1259 ppa.g.lun = rlun->bppa.g.lun;
1261 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
1263 pr_err("rrpc: could not get BB table\n");
1267 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
1273 for (i = 0; i < nr_blks; i++) {
1274 if (blks[i] == NVM_BLK_T_FREE)
1277 rblk = &rlun->blocks[i];
1278 list_move_tail(&rblk->list, &rlun->bb_list);
1279 rblk->state = NVM_BLK_ST_BAD;
1280 rlun->nr_free_blocks--;
1288 static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
1291 rlun->bppa.g.ch = ppa.g.ch;
1292 rlun->bppa.g.lun = ppa.g.lun;
1295 static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
1297 struct nvm_tgt_dev *dev = rrpc->dev;
1298 struct nvm_geo *geo = &dev->geo;
1299 struct rrpc_lun *rlun;
1300 int i, j, ret = -EINVAL;
1302 if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1303 pr_err("rrpc: number of pages per block too high.");
1307 spin_lock_init(&rrpc->rev_lock);
1309 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1315 for (i = 0; i < rrpc->nr_luns; i++) {
1316 rlun = &rrpc->luns[i];
1318 rrpc_set_lun_ppa(rlun, luns[i]);
1319 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1321 if (!rlun->blocks) {
1326 INIT_LIST_HEAD(&rlun->free_list);
1327 INIT_LIST_HEAD(&rlun->used_list);
1328 INIT_LIST_HEAD(&rlun->bb_list);
1330 for (j = 0; j < geo->blks_per_lun; j++) {
1331 struct rrpc_block *rblk = &rlun->blocks[j];
1335 rblk->state = NVM_BLK_T_FREE;
1336 INIT_LIST_HEAD(&rblk->prio);
1337 INIT_LIST_HEAD(&rblk->list);
1338 spin_lock_init(&rblk->lock);
1340 list_add_tail(&rblk->list, &rlun->free_list);
1344 rlun->nr_free_blocks = geo->blks_per_lun;
1345 rlun->reserved_blocks = 2; /* for GC only */
1347 INIT_LIST_HEAD(&rlun->prio_list);
1348 INIT_LIST_HEAD(&rlun->wblk_list);
1350 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1351 spin_lock_init(&rlun->lock);
1353 if (rrpc_bb_discovery(dev, rlun))
1363 /* returns 0 on success and stores the beginning address in *begin */
1364 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1366 struct nvm_tgt_dev *dev = rrpc->dev;
1367 sector_t size = rrpc->nr_sects * dev->geo.sec_size;
1372 ret = nvm_get_area(dev, begin, size);
1374 *begin >>= (ilog2(dev->geo.sec_size) - 9);
1379 static void rrpc_area_free(struct rrpc *rrpc)
1381 struct nvm_tgt_dev *dev = rrpc->dev;
1382 sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
1384 nvm_put_area(dev, begin);
1387 static void rrpc_free(struct rrpc *rrpc)
1390 rrpc_map_free(rrpc);
1391 rrpc_core_free(rrpc);
1392 rrpc_luns_free(rrpc);
1393 rrpc_area_free(rrpc);
1398 static void rrpc_exit(void *private)
1400 struct rrpc *rrpc = private;
1402 del_timer(&rrpc->gc_timer);
1404 flush_workqueue(rrpc->krqd_wq);
1405 flush_workqueue(rrpc->kgc_wq);
1410 static sector_t rrpc_capacity(void *private)
1412 struct rrpc *rrpc = private;
1413 struct nvm_tgt_dev *dev = rrpc->dev;
1414 sector_t reserved, provisioned;
1416 /* cur, gc, and two emergency blocks for each lun */
1417 reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
1418 provisioned = rrpc->nr_sects - reserved;
1420 if (reserved > rrpc->nr_sects) {
1421 pr_err("rrpc: not enough space available to expose storage.\n");
1425 sector_div(provisioned, 10);
1426 return provisioned * 9 * NR_PHY_IN_LOG;
1430 * Looks up the logical address from reverse trans map and check if its valid by
1431 * comparing the logical to physical address with the physical address.
1432 * Returns 0 on free, otherwise 1 if in use
1434 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1436 struct nvm_tgt_dev *dev = rrpc->dev;
1438 struct rrpc_addr *laddr;
1439 u64 bpaddr, paddr, pladdr;
1441 bpaddr = block_to_rel_addr(rrpc, rblk);
1442 for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
1443 paddr = bpaddr + offset;
1445 pladdr = rrpc->rev_trans_map[paddr].addr;
1446 if (pladdr == ADDR_EMPTY)
1449 laddr = &rrpc->trans_map[pladdr];
1451 if (paddr == laddr->addr) {
1454 set_bit(offset, rblk->invalid_pages);
1455 rblk->nr_invalid_pages++;
1460 static int rrpc_blocks_init(struct rrpc *rrpc)
1462 struct nvm_tgt_dev *dev = rrpc->dev;
1463 struct rrpc_lun *rlun;
1464 struct rrpc_block *rblk;
1465 int lun_iter, blk_iter;
1467 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1468 rlun = &rrpc->luns[lun_iter];
1470 for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
1472 rblk = &rlun->blocks[blk_iter];
1473 rrpc_block_map_update(rrpc, rblk);
1480 static int rrpc_luns_configure(struct rrpc *rrpc)
1482 struct rrpc_lun *rlun;
1483 struct rrpc_block *rblk;
1486 for (i = 0; i < rrpc->nr_luns; i++) {
1487 rlun = &rrpc->luns[i];
1489 rblk = rrpc_get_blk(rrpc, rlun, 0);
1492 rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1494 /* Emergency gc block */
1495 rblk = rrpc_get_blk(rrpc, rlun, 1);
1498 rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1503 rrpc_put_blks(rrpc);
1507 static struct nvm_tgt_type tt_rrpc;
1509 static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1512 struct request_queue *bqueue = dev->q;
1513 struct request_queue *tqueue = tdisk->queue;
1514 struct nvm_geo *geo = &dev->geo;
1519 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1520 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1522 return ERR_PTR(-EINVAL);
1525 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1527 return ERR_PTR(-ENOMEM);
1532 bio_list_init(&rrpc->requeue_bios);
1533 spin_lock_init(&rrpc->bio_lock);
1534 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1536 rrpc->nr_luns = geo->nr_luns;
1537 rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
1539 /* simple round-robin strategy */
1540 atomic_set(&rrpc->next_lun, -1);
1542 ret = rrpc_area_init(rrpc, &soffset);
1544 pr_err("nvm: rrpc: could not initialize area\n");
1545 return ERR_PTR(ret);
1547 rrpc->soffset = soffset;
1549 ret = rrpc_luns_init(rrpc, dev->luns);
1551 pr_err("nvm: rrpc: could not initialize luns\n");
1555 ret = rrpc_core_init(rrpc);
1557 pr_err("nvm: rrpc: could not initialize core\n");
1561 ret = rrpc_map_init(rrpc);
1563 pr_err("nvm: rrpc: could not initialize maps\n");
1567 ret = rrpc_blocks_init(rrpc);
1569 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1573 ret = rrpc_luns_configure(rrpc);
1575 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1579 ret = rrpc_gc_init(rrpc);
1581 pr_err("nvm: rrpc: could not initialize gc\n");
1585 /* inherit the size from the underlying device */
1586 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1587 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1589 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1590 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1592 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1597 return ERR_PTR(ret);
1600 /* round robin, page-based FTL, and cost-based GC */
1601 static struct nvm_tgt_type tt_rrpc = {
1603 .version = {1, 0, 0},
1605 .make_rq = rrpc_make_rq,
1606 .capacity = rrpc_capacity,
1612 static int __init rrpc_module_init(void)
1614 return nvm_register_tgt_type(&tt_rrpc);
1617 static void rrpc_module_exit(void)
1619 nvm_unregister_tgt_type(&tt_rrpc);
1622 module_init(rrpc_module_init);
1623 module_exit(rrpc_module_exit);
1624 MODULE_LICENSE("GPL v2");
1625 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");