2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Implementation of a physical block-device target for Open-channel SSDs.
18 * pblk-init.c - pblk's initialization.
23 static unsigned int write_buffer_size;
25 module_param(write_buffer_size, uint, 0644);
26 MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
28 static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
30 static DECLARE_RWSEM(pblk_lock);
31 struct bio_set pblk_bio_set;
33 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
38 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
39 * constraint. Writes can be of arbitrary size.
41 if (bio_data_dir(bio) == READ) {
42 blk_queue_split(q, &bio);
43 ret = pblk_submit_read(pblk, bio);
44 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
50 /* Prevent deadlock in the case of a modest LUN configuration and large
51 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
52 * available for user I/O.
54 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
55 blk_queue_split(q, &bio);
57 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
60 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
62 struct pblk *pblk = q->queuedata;
64 if (bio_op(bio) == REQ_OP_DISCARD) {
65 pblk_discard(pblk, bio);
66 if (!(bio->bi_opf & REQ_PREFLUSH)) {
72 switch (pblk_rw_io(q, pblk, bio)) {
84 static size_t pblk_trans_map_size(struct pblk *pblk)
88 if (pblk->addrf_len < 32)
91 return entry_size * pblk->rl.nr_secs;
94 #ifdef CONFIG_NVM_DEBUG
95 static u32 pblk_l2p_crc(struct pblk *pblk)
100 map_size = pblk_trans_map_size(pblk);
101 crc = crc32_le(crc, pblk->trans_map, map_size);
106 static void pblk_l2p_free(struct pblk *pblk)
108 vfree(pblk->trans_map);
111 static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
113 struct pblk_line *line = NULL;
116 pblk_setup_uuid(pblk);
118 line = pblk_recov_l2p(pblk);
120 pr_err("pblk: could not recover l2p table\n");
125 #ifdef CONFIG_NVM_DEBUG
126 pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
129 /* Free full lines directly as GC has not been started yet */
130 pblk_gc_free_full_lines(pblk);
133 /* Configure next line for user data */
134 line = pblk_line_get_first_data(pblk);
142 static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
149 map_size = pblk_trans_map_size(pblk);
150 pblk->trans_map = vmalloc(map_size);
151 if (!pblk->trans_map)
154 pblk_ppa_set_empty(&ppa);
156 for (i = 0; i < pblk->rl.nr_secs; i++)
157 pblk_trans_map_set(pblk, i, ppa);
159 ret = pblk_l2p_recover(pblk, factory_init);
161 vfree(pblk->trans_map);
166 static void pblk_rwb_free(struct pblk *pblk)
168 if (pblk_rb_tear_down_check(&pblk->rwb))
169 pr_err("pblk: write buffer error on tear down\n");
171 pblk_rb_data_free(&pblk->rwb);
172 vfree(pblk_rb_entries_ref(&pblk->rwb));
175 static int pblk_rwb_init(struct pblk *pblk)
177 struct nvm_tgt_dev *dev = pblk->dev;
178 struct nvm_geo *geo = &dev->geo;
179 struct pblk_rb_entry *entries;
180 unsigned long nr_entries, buffer_size;
181 unsigned int power_size, power_seg_sz;
183 if (write_buffer_size && (write_buffer_size > pblk->pgs_in_buffer))
184 buffer_size = write_buffer_size;
186 buffer_size = pblk->pgs_in_buffer;
188 nr_entries = pblk_rb_calculate_size(buffer_size);
190 entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
194 power_size = get_count_order(nr_entries);
195 power_seg_sz = get_count_order(geo->csecs);
197 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
200 /* Minimum pages needed within a lun */
201 #define ADDR_POOL_SIZE 64
203 static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
205 struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
208 /* Re-calculate channel and lun format to adapt to configuration */
209 power_len = get_count_order(geo->num_ch);
210 if (1 << power_len != geo->num_ch) {
211 pr_err("pblk: supports only power-of-two channel config.\n");
214 dst->ch_len = power_len;
216 power_len = get_count_order(geo->num_lun);
217 if (1 << power_len != geo->num_lun) {
218 pr_err("pblk: supports only power-of-two LUN config.\n");
221 dst->lun_len = power_len;
223 dst->blk_len = src->blk_len;
224 dst->pg_len = src->pg_len;
225 dst->pln_len = src->pln_len;
226 dst->sec_len = src->sec_len;
229 dst->pln_offset = dst->sec_len;
230 dst->ch_offset = dst->pln_offset + dst->pln_len;
231 dst->lun_offset = dst->ch_offset + dst->ch_len;
232 dst->pg_offset = dst->lun_offset + dst->lun_len;
233 dst->blk_offset = dst->pg_offset + dst->pg_len;
235 dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
236 dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
237 dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
238 dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
239 dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
240 dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
242 return dst->blk_offset + src->blk_len;
245 static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
246 struct pblk_addrf *udst)
248 struct nvm_addrf *src = &geo->addrf;
250 adst->ch_len = get_count_order(geo->num_ch);
251 adst->lun_len = get_count_order(geo->num_lun);
252 adst->chk_len = src->chk_len;
253 adst->sec_len = src->sec_len;
255 adst->sec_offset = 0;
256 adst->ch_offset = adst->sec_len;
257 adst->lun_offset = adst->ch_offset + adst->ch_len;
258 adst->chk_offset = adst->lun_offset + adst->lun_len;
260 adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
261 adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
262 adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
263 adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
265 udst->sec_stripe = geo->ws_opt;
266 udst->ch_stripe = geo->num_ch;
267 udst->lun_stripe = geo->num_lun;
269 udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
270 udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
272 return adst->chk_offset + adst->chk_len;
275 static int pblk_set_addrf(struct pblk *pblk)
277 struct nvm_tgt_dev *dev = pblk->dev;
278 struct nvm_geo *geo = &dev->geo;
281 switch (geo->version) {
282 case NVM_OCSSD_SPEC_12:
283 div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
285 pr_err("pblk: bad configuration of sectors/pages\n");
289 pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf);
291 case NVM_OCSSD_SPEC_20:
292 pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
296 pr_err("pblk: OCSSD revision not supported (%d)\n",
304 static int pblk_init_global_caches(struct pblk *pblk)
306 down_write(&pblk_lock);
307 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
308 sizeof(struct pblk_line_ws), 0, 0, NULL);
309 if (!pblk_ws_cache) {
310 up_write(&pblk_lock);
314 pblk_rec_cache = kmem_cache_create("pblk_rec",
315 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
316 if (!pblk_rec_cache) {
317 kmem_cache_destroy(pblk_ws_cache);
318 up_write(&pblk_lock);
322 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
324 if (!pblk_g_rq_cache) {
325 kmem_cache_destroy(pblk_ws_cache);
326 kmem_cache_destroy(pblk_rec_cache);
327 up_write(&pblk_lock);
331 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
333 if (!pblk_w_rq_cache) {
334 kmem_cache_destroy(pblk_ws_cache);
335 kmem_cache_destroy(pblk_rec_cache);
336 kmem_cache_destroy(pblk_g_rq_cache);
337 up_write(&pblk_lock);
340 up_write(&pblk_lock);
345 static void pblk_free_global_caches(struct pblk *pblk)
347 kmem_cache_destroy(pblk_ws_cache);
348 kmem_cache_destroy(pblk_rec_cache);
349 kmem_cache_destroy(pblk_g_rq_cache);
350 kmem_cache_destroy(pblk_w_rq_cache);
353 static int pblk_core_init(struct pblk *pblk)
355 struct nvm_tgt_dev *dev = pblk->dev;
356 struct nvm_geo *geo = &dev->geo;
357 int ret, max_write_ppas;
359 atomic64_set(&pblk->user_wa, 0);
360 atomic64_set(&pblk->pad_wa, 0);
361 atomic64_set(&pblk->gc_wa, 0);
362 pblk->user_rst_wa = 0;
363 pblk->pad_rst_wa = 0;
366 atomic64_set(&pblk->nr_flush, 0);
367 pblk->nr_flush_rst = 0;
369 pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns;
371 pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
372 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
373 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
374 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
376 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
377 pr_err("pblk: vector list too big(%u > %u)\n",
378 pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
382 pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
387 if (pblk_init_global_caches(pblk))
388 goto fail_free_pad_dist;
390 /* Internal bios can be at most the sectors signaled by the device. */
391 ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
393 goto free_global_caches;
395 ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
398 goto free_page_bio_pool;
400 ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
403 goto free_gen_ws_pool;
405 ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
410 ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
415 ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
420 pblk->close_wq = alloc_workqueue("pblk-close-wq",
421 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
425 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
426 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
430 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
431 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
435 if (pblk_set_addrf(pblk))
438 INIT_LIST_HEAD(&pblk->compl_list);
439 INIT_LIST_HEAD(&pblk->resubmit_list);
444 destroy_workqueue(pblk->r_end_wq);
446 destroy_workqueue(pblk->bb_wq);
448 destroy_workqueue(pblk->close_wq);
450 mempool_exit(&pblk->w_rq_pool);
452 mempool_exit(&pblk->e_rq_pool);
454 mempool_exit(&pblk->r_rq_pool);
456 mempool_exit(&pblk->rec_pool);
458 mempool_exit(&pblk->gen_ws_pool);
460 mempool_exit(&pblk->page_bio_pool);
462 pblk_free_global_caches(pblk);
464 kfree(pblk->pad_dist);
468 static void pblk_core_free(struct pblk *pblk)
471 destroy_workqueue(pblk->close_wq);
474 destroy_workqueue(pblk->r_end_wq);
477 destroy_workqueue(pblk->bb_wq);
479 mempool_exit(&pblk->page_bio_pool);
480 mempool_exit(&pblk->gen_ws_pool);
481 mempool_exit(&pblk->rec_pool);
482 mempool_exit(&pblk->r_rq_pool);
483 mempool_exit(&pblk->e_rq_pool);
484 mempool_exit(&pblk->w_rq_pool);
486 pblk_free_global_caches(pblk);
487 kfree(pblk->pad_dist);
490 static void pblk_line_mg_free(struct pblk *pblk)
492 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
495 kfree(l_mg->bb_template);
497 kfree(l_mg->vsc_list);
499 for (i = 0; i < PBLK_DATA_LINES; i++) {
500 kfree(l_mg->sline_meta[i]);
501 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
502 kfree(l_mg->eline_meta[i]);
506 static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
507 struct pblk_line *line)
509 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
511 kfree(line->blk_bitmap);
512 kfree(line->erase_bitmap);
515 pblk_mfree(w_err_gc->lba_list, l_mg->emeta_alloc_type);
519 static void pblk_lines_free(struct pblk *pblk)
521 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
522 struct pblk_line *line;
525 spin_lock(&l_mg->free_lock);
526 for (i = 0; i < l_mg->nr_lines; i++) {
527 line = &pblk->lines[i];
529 pblk_line_free(line);
530 pblk_line_meta_free(l_mg, line);
532 spin_unlock(&l_mg->free_lock);
534 pblk_line_mg_free(pblk);
540 static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
541 u8 *blks, int nr_blks)
547 ppa.g.ch = rlun->bppa.g.ch;
548 ppa.g.lun = rlun->bppa.g.lun;
550 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
554 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
561 static void *pblk_bb_get_meta(struct pblk *pblk)
563 struct nvm_tgt_dev *dev = pblk->dev;
564 struct nvm_geo *geo = &dev->geo;
566 int i, nr_blks, blk_per_lun;
569 blk_per_lun = geo->num_chk * geo->pln_mode;
570 nr_blks = blk_per_lun * geo->all_luns;
572 meta = kmalloc(nr_blks, GFP_KERNEL);
574 return ERR_PTR(-ENOMEM);
576 for (i = 0; i < geo->all_luns; i++) {
577 struct pblk_lun *rlun = &pblk->luns[i];
578 u8 *meta_pos = meta + i * blk_per_lun;
580 ret = pblk_bb_get_tbl(dev, rlun, meta_pos, blk_per_lun);
583 return ERR_PTR(-EIO);
590 static void *pblk_chunk_get_meta(struct pblk *pblk)
592 struct nvm_tgt_dev *dev = pblk->dev;
593 struct nvm_geo *geo = &dev->geo;
595 if (geo->version == NVM_OCSSD_SPEC_12)
596 return pblk_bb_get_meta(pblk);
598 return pblk_chunk_get_info(pblk);
601 static int pblk_luns_init(struct pblk *pblk)
603 struct nvm_tgt_dev *dev = pblk->dev;
604 struct nvm_geo *geo = &dev->geo;
605 struct pblk_lun *rlun;
608 /* TODO: Implement unbalanced LUN support */
609 if (geo->num_lun < 0) {
610 pr_err("pblk: unbalanced LUN config.\n");
614 pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
619 for (i = 0; i < geo->all_luns; i++) {
620 /* Stripe across channels */
621 int ch = i % geo->num_ch;
622 int lun_raw = i / geo->num_ch;
623 int lunid = lun_raw + ch * geo->num_lun;
625 rlun = &pblk->luns[i];
626 rlun->bppa = dev->luns[lunid];
628 sema_init(&rlun->wr_sem, 1);
634 /* See comment over struct line_emeta definition */
635 static unsigned int calc_emeta_len(struct pblk *pblk)
637 struct pblk_line_meta *lm = &pblk->lm;
638 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
639 struct nvm_tgt_dev *dev = pblk->dev;
640 struct nvm_geo *geo = &dev->geo;
642 /* Round to sector size so that lba_list starts on its own sector */
643 lm->emeta_sec[1] = DIV_ROUND_UP(
644 sizeof(struct line_emeta) + lm->blk_bitmap_len +
645 sizeof(struct wa_counters), geo->csecs);
646 lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
648 /* Round to sector size so that vsc_list starts on its own sector */
649 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
650 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
652 lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
654 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
656 lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
658 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
660 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
663 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
665 struct nvm_tgt_dev *dev = pblk->dev;
666 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
667 struct pblk_line_meta *lm = &pblk->lm;
668 struct nvm_geo *geo = &dev->geo;
669 sector_t provisioned;
670 int sec_meta, blk_meta;
672 if (geo->op == NVM_TARGET_DEFAULT_OP)
673 pblk->op = PBLK_DEFAULT_OP;
677 provisioned = nr_free_blks;
678 provisioned *= (100 - pblk->op);
679 sector_div(provisioned, 100);
681 pblk->op_blks = nr_free_blks - provisioned;
683 /* Internally pblk manages all free blocks, but all calculations based
684 * on user capacity consider only provisioned blocks
686 pblk->rl.total_blocks = nr_free_blks;
687 pblk->rl.nr_secs = nr_free_blks * geo->clba;
689 /* Consider sectors used for metadata */
690 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
691 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
693 pblk->capacity = (provisioned - blk_meta) * geo->clba;
695 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
696 atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
699 static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line,
702 struct nvm_tgt_dev *dev = pblk->dev;
703 struct nvm_geo *geo = &dev->geo;
704 struct pblk_line_meta *lm = &pblk->lm;
705 int i, chk_per_lun, nr_bad_chks = 0;
707 chk_per_lun = geo->num_chk * geo->pln_mode;
709 for (i = 0; i < lm->blk_per_line; i++) {
710 struct pblk_lun *rlun = &pblk->luns[i];
711 struct nvm_chk_meta *chunk;
712 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
713 u8 *lun_bb_meta = chunk_meta + pos * chk_per_lun;
715 chunk = &line->chks[pos];
718 * In 1.2 spec. chunk state is not persisted by the device. Thus
719 * some of the values are reset each time pblk is instantiated.
721 if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
722 chunk->state = NVM_CHK_ST_FREE;
724 chunk->state = NVM_CHK_ST_OFFLINE;
726 chunk->type = NVM_CHK_TP_W_SEQ;
729 chunk->cnlb = geo->clba;
732 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
735 set_bit(pos, line->blk_bitmap);
742 static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
743 struct nvm_chk_meta *meta)
745 struct nvm_tgt_dev *dev = pblk->dev;
746 struct nvm_geo *geo = &dev->geo;
747 struct pblk_line_meta *lm = &pblk->lm;
748 int i, nr_bad_chks = 0;
750 for (i = 0; i < lm->blk_per_line; i++) {
751 struct pblk_lun *rlun = &pblk->luns[i];
752 struct nvm_chk_meta *chunk;
753 struct nvm_chk_meta *chunk_meta;
758 pos = pblk_ppa_to_pos(geo, ppa);
759 chunk = &line->chks[pos];
761 ppa.m.chk = line->id;
762 chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
764 chunk->state = chunk_meta->state;
765 chunk->type = chunk_meta->type;
766 chunk->wi = chunk_meta->wi;
767 chunk->slba = chunk_meta->slba;
768 chunk->cnlb = chunk_meta->cnlb;
769 chunk->wp = chunk_meta->wp;
771 if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
772 WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
776 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
779 set_bit(pos, line->blk_bitmap);
786 static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
787 void *chunk_meta, int line_id)
789 struct nvm_tgt_dev *dev = pblk->dev;
790 struct nvm_geo *geo = &dev->geo;
791 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
792 struct pblk_line_meta *lm = &pblk->lm;
793 long nr_bad_chks, chk_in_line;
797 line->type = PBLK_LINETYPE_FREE;
798 line->state = PBLK_LINESTATE_NEW;
799 line->gc_group = PBLK_LINEGC_NONE;
800 line->vsc = &l_mg->vsc_list[line_id];
801 spin_lock_init(&line->lock);
803 if (geo->version == NVM_OCSSD_SPEC_12)
804 nr_bad_chks = pblk_setup_line_meta_12(pblk, line, chunk_meta);
806 nr_bad_chks = pblk_setup_line_meta_20(pblk, line, chunk_meta);
808 chk_in_line = lm->blk_per_line - nr_bad_chks;
809 if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
810 chk_in_line < lm->min_blk_line) {
811 line->state = PBLK_LINESTATE_BAD;
812 list_add_tail(&line->list, &l_mg->bad_list);
816 atomic_set(&line->blk_in_line, chk_in_line);
817 list_add_tail(&line->list, &l_mg->free_list);
818 l_mg->nr_free_lines++;
823 static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
825 struct pblk_line_meta *lm = &pblk->lm;
827 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
828 if (!line->blk_bitmap)
831 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
832 if (!line->erase_bitmap)
833 goto free_blk_bitmap;
836 line->chks = kmalloc_array(lm->blk_per_line,
837 sizeof(struct nvm_chk_meta), GFP_KERNEL);
839 goto free_erase_bitmap;
841 line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
850 kfree(line->erase_bitmap);
852 kfree(line->blk_bitmap);
856 static int pblk_line_mg_init(struct pblk *pblk)
858 struct nvm_tgt_dev *dev = pblk->dev;
859 struct nvm_geo *geo = &dev->geo;
860 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
861 struct pblk_line_meta *lm = &pblk->lm;
864 l_mg->nr_lines = geo->num_chk;
865 l_mg->log_line = l_mg->data_line = NULL;
866 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
867 l_mg->nr_free_lines = 0;
868 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
870 INIT_LIST_HEAD(&l_mg->free_list);
871 INIT_LIST_HEAD(&l_mg->corrupt_list);
872 INIT_LIST_HEAD(&l_mg->bad_list);
873 INIT_LIST_HEAD(&l_mg->gc_full_list);
874 INIT_LIST_HEAD(&l_mg->gc_high_list);
875 INIT_LIST_HEAD(&l_mg->gc_mid_list);
876 INIT_LIST_HEAD(&l_mg->gc_low_list);
877 INIT_LIST_HEAD(&l_mg->gc_empty_list);
878 INIT_LIST_HEAD(&l_mg->gc_werr_list);
880 INIT_LIST_HEAD(&l_mg->emeta_list);
882 l_mg->gc_lists[0] = &l_mg->gc_werr_list;
883 l_mg->gc_lists[1] = &l_mg->gc_high_list;
884 l_mg->gc_lists[2] = &l_mg->gc_mid_list;
885 l_mg->gc_lists[3] = &l_mg->gc_low_list;
887 spin_lock_init(&l_mg->free_lock);
888 spin_lock_init(&l_mg->close_lock);
889 spin_lock_init(&l_mg->gc_lock);
891 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
895 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
896 if (!l_mg->bb_template)
897 goto fail_free_vsc_list;
899 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
901 goto fail_free_bb_template;
903 /* smeta is always small enough to fit on a kmalloc memory allocation,
904 * emeta depends on the number of LUNs allocated to the pblk instance
906 for (i = 0; i < PBLK_DATA_LINES; i++) {
907 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
908 if (!l_mg->sline_meta[i])
909 goto fail_free_smeta;
912 /* emeta allocates three different buffers for managing metadata with
913 * in-memory and in-media layouts
915 for (i = 0; i < PBLK_DATA_LINES; i++) {
916 struct pblk_emeta *emeta;
918 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
920 goto fail_free_emeta;
922 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
923 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
925 emeta->buf = vmalloc(lm->emeta_len[0]);
928 goto fail_free_emeta;
931 emeta->nr_entries = lm->emeta_sec[0];
932 l_mg->eline_meta[i] = emeta;
934 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
936 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
939 goto fail_free_emeta;
942 emeta->nr_entries = lm->emeta_sec[0];
943 l_mg->eline_meta[i] = emeta;
947 for (i = 0; i < l_mg->nr_lines; i++)
948 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
950 bb_distance = (geo->all_luns) * geo->ws_opt;
951 for (i = 0; i < lm->sec_per_line; i += bb_distance)
952 bitmap_set(l_mg->bb_template, i, geo->ws_opt);
958 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
959 vfree(l_mg->eline_meta[i]->buf);
961 kfree(l_mg->eline_meta[i]->buf);
962 kfree(l_mg->eline_meta[i]);
965 for (i = 0; i < PBLK_DATA_LINES; i++)
966 kfree(l_mg->sline_meta[i]);
968 fail_free_bb_template:
969 kfree(l_mg->bb_template);
971 kfree(l_mg->vsc_list);
976 static int pblk_line_meta_init(struct pblk *pblk)
978 struct nvm_tgt_dev *dev = pblk->dev;
979 struct nvm_geo *geo = &dev->geo;
980 struct pblk_line_meta *lm = &pblk->lm;
981 unsigned int smeta_len, emeta_len;
984 lm->sec_per_line = geo->clba * geo->all_luns;
985 lm->blk_per_line = geo->all_luns;
986 lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
987 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
988 lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
989 lm->mid_thrs = lm->sec_per_line / 2;
990 lm->high_thrs = lm->sec_per_line / 4;
991 lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
993 /* Calculate necessary pages for smeta. See comment over struct
994 * line_smeta definition
998 lm->smeta_sec = i * geo->ws_opt;
999 lm->smeta_len = lm->smeta_sec * geo->csecs;
1001 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
1002 if (smeta_len > lm->smeta_len) {
1004 goto add_smeta_page;
1007 /* Calculate necessary pages for emeta. See comment over struct
1008 * line_emeta definition
1012 lm->emeta_sec[0] = i * geo->ws_opt;
1013 lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
1015 emeta_len = calc_emeta_len(pblk);
1016 if (emeta_len > lm->emeta_len[0]) {
1018 goto add_emeta_page;
1021 lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
1023 lm->min_blk_line = 1;
1024 if (geo->all_luns > 1)
1025 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
1026 lm->emeta_sec[0], geo->clba);
1028 if (lm->min_blk_line > lm->blk_per_line) {
1029 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
1037 static int pblk_lines_init(struct pblk *pblk)
1039 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1040 struct pblk_line *line;
1042 long nr_free_chks = 0;
1045 ret = pblk_line_meta_init(pblk);
1049 ret = pblk_line_mg_init(pblk);
1053 ret = pblk_luns_init(pblk);
1055 goto fail_free_meta;
1057 chunk_meta = pblk_chunk_get_meta(pblk);
1058 if (IS_ERR(chunk_meta)) {
1059 ret = PTR_ERR(chunk_meta);
1060 goto fail_free_luns;
1063 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
1067 goto fail_free_chunk_meta;
1070 for (i = 0; i < l_mg->nr_lines; i++) {
1071 line = &pblk->lines[i];
1073 ret = pblk_alloc_line_meta(pblk, line);
1075 goto fail_free_lines;
1077 nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
1080 if (!nr_free_chks) {
1081 pr_err("pblk: too many bad blocks prevent for sane instance\n");
1085 pblk_set_provision(pblk, nr_free_chks);
1092 pblk_line_meta_free(l_mg, &pblk->lines[i]);
1094 fail_free_chunk_meta:
1099 pblk_line_mg_free(pblk);
1104 static int pblk_writer_init(struct pblk *pblk)
1106 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
1107 if (IS_ERR(pblk->writer_ts)) {
1108 int err = PTR_ERR(pblk->writer_ts);
1111 pr_err("pblk: could not allocate writer kthread (%d)\n",
1116 timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
1117 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
1122 static void pblk_writer_stop(struct pblk *pblk)
1124 /* The pipeline must be stopped and the write buffer emptied before the
1125 * write thread is stopped
1127 WARN(pblk_rb_read_count(&pblk->rwb),
1128 "Stopping not fully persisted write buffer\n");
1130 WARN(pblk_rb_sync_count(&pblk->rwb),
1131 "Stopping not fully synced write buffer\n");
1133 del_timer_sync(&pblk->wtimer);
1134 if (pblk->writer_ts)
1135 kthread_stop(pblk->writer_ts);
1138 static void pblk_free(struct pblk *pblk)
1140 pblk_lines_free(pblk);
1141 pblk_l2p_free(pblk);
1142 pblk_rwb_free(pblk);
1143 pblk_core_free(pblk);
1148 static void pblk_tear_down(struct pblk *pblk, bool graceful)
1151 __pblk_pipeline_flush(pblk);
1152 __pblk_pipeline_stop(pblk);
1153 pblk_writer_stop(pblk);
1154 pblk_rb_sync_l2p(&pblk->rwb);
1155 pblk_rl_free(&pblk->rl);
1157 pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful);
1160 static void pblk_exit(void *private, bool graceful)
1162 struct pblk *pblk = private;
1164 down_write(&pblk_lock);
1165 pblk_gc_exit(pblk, graceful);
1166 pblk_tear_down(pblk, graceful);
1168 #ifdef CONFIG_NVM_DEBUG
1169 pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
1173 up_write(&pblk_lock);
1176 static sector_t pblk_capacity(void *private)
1178 struct pblk *pblk = private;
1180 return pblk->capacity * NR_PHY_IN_LOG;
1183 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1186 struct nvm_geo *geo = &dev->geo;
1187 struct request_queue *bqueue = dev->q;
1188 struct request_queue *tqueue = tdisk->queue;
1192 /* pblk supports 1.2 and 2.0 versions */
1193 if (!(geo->version == NVM_OCSSD_SPEC_12 ||
1194 geo->version == NVM_OCSSD_SPEC_20)) {
1195 pr_err("pblk: OCSSD version not supported (%u)\n",
1197 return ERR_PTR(-EINVAL);
1200 if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
1201 pr_err("pblk: host-side L2P table not supported. (%x)\n",
1203 return ERR_PTR(-EINVAL);
1206 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1208 return ERR_PTR(-ENOMEM);
1212 pblk->state = PBLK_STATE_RUNNING;
1213 pblk->gc.gc_enabled = 0;
1215 spin_lock_init(&pblk->resubmit_lock);
1216 spin_lock_init(&pblk->trans_lock);
1217 spin_lock_init(&pblk->lock);
1219 #ifdef CONFIG_NVM_DEBUG
1220 atomic_long_set(&pblk->inflight_writes, 0);
1221 atomic_long_set(&pblk->padded_writes, 0);
1222 atomic_long_set(&pblk->padded_wb, 0);
1223 atomic_long_set(&pblk->req_writes, 0);
1224 atomic_long_set(&pblk->sub_writes, 0);
1225 atomic_long_set(&pblk->sync_writes, 0);
1226 atomic_long_set(&pblk->inflight_reads, 0);
1227 atomic_long_set(&pblk->cache_reads, 0);
1228 atomic_long_set(&pblk->sync_reads, 0);
1229 atomic_long_set(&pblk->recov_writes, 0);
1230 atomic_long_set(&pblk->recov_writes, 0);
1231 atomic_long_set(&pblk->recov_gc_writes, 0);
1232 atomic_long_set(&pblk->recov_gc_reads, 0);
1235 atomic_long_set(&pblk->read_failed, 0);
1236 atomic_long_set(&pblk->read_empty, 0);
1237 atomic_long_set(&pblk->read_high_ecc, 0);
1238 atomic_long_set(&pblk->read_failed_gc, 0);
1239 atomic_long_set(&pblk->write_failed, 0);
1240 atomic_long_set(&pblk->erase_failed, 0);
1242 ret = pblk_core_init(pblk);
1244 pr_err("pblk: could not initialize core\n");
1248 ret = pblk_lines_init(pblk);
1250 pr_err("pblk: could not initialize lines\n");
1251 goto fail_free_core;
1254 ret = pblk_rwb_init(pblk);
1256 pr_err("pblk: could not initialize write buffer\n");
1257 goto fail_free_lines;
1260 ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
1262 pr_err("pblk: could not initialize maps\n");
1266 ret = pblk_writer_init(pblk);
1269 pr_err("pblk: could not initialize write thread\n");
1273 ret = pblk_gc_init(pblk);
1275 pr_err("pblk: could not initialize gc\n");
1276 goto fail_stop_writer;
1279 /* inherit the size from the underlying device */
1280 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1281 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1283 blk_queue_write_cache(tqueue, true, false);
1285 tqueue->limits.discard_granularity = geo->clba * geo->csecs;
1286 tqueue->limits.discard_alignment = 0;
1287 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1288 blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1290 pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1292 geo->all_luns, pblk->l_mg.nr_lines,
1293 (unsigned long long)pblk->rl.nr_secs,
1294 pblk->rwb.nr_entries);
1296 wake_up_process(pblk->writer_ts);
1298 /* Check if we need to start GC */
1299 pblk_gc_should_kick(pblk);
1304 pblk_writer_stop(pblk);
1306 pblk_l2p_free(pblk);
1308 pblk_rwb_free(pblk);
1310 pblk_lines_free(pblk);
1312 pblk_core_free(pblk);
1315 return ERR_PTR(ret);
1318 /* physical block device target */
1319 static struct nvm_tgt_type tt_pblk = {
1321 .version = {1, 0, 0},
1323 .make_rq = pblk_make_rq,
1324 .capacity = pblk_capacity,
1329 .sysfs_init = pblk_sysfs_init,
1330 .sysfs_exit = pblk_sysfs_exit,
1331 .owner = THIS_MODULE,
1334 static int __init pblk_module_init(void)
1338 ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
1341 ret = nvm_register_tgt_type(&tt_pblk);
1343 bioset_exit(&pblk_bio_set);
1347 static void pblk_module_exit(void)
1349 bioset_exit(&pblk_bio_set);
1350 nvm_unregister_tgt_type(&tt_pblk);
1353 module_init(pblk_module_init);
1354 module_exit(pblk_module_exit);
1357 MODULE_LICENSE("GPL v2");
1358 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");