2 * Copyright (C) 2016 CNEX Labs
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-write.c - pblk's write path from write buffer to media
20 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21 struct pblk_c_ctx *c_ctx)
23 struct bio *original_bio;
27 for (i = 0; i < c_ctx->nr_valid; i++) {
28 struct pblk_w_ctx *w_ctx;
30 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
31 while ((original_bio = bio_list_pop(&w_ctx->bios)))
32 bio_endio(original_bio);
36 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
39 #ifdef CONFIG_NVM_DEBUG
40 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
43 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
46 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
51 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
53 struct pblk_c_ctx *c_ctx)
55 list_del(&c_ctx->list);
56 return pblk_end_w_bio(pblk, rqd, c_ctx);
59 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
60 struct pblk_c_ctx *c_ctx)
62 struct pblk_c_ctx *c, *r;
66 #ifdef CONFIG_NVM_DEBUG
67 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
70 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
72 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
73 if (pos == c_ctx->sentry) {
74 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
77 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
78 rqd = nvm_rq_from_c_ctx(c);
79 if (c->sentry == pos) {
80 pos = pblk_end_queued_w_bio(pblk, rqd, c);
85 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
86 list_add_tail(&c_ctx->list, &pblk->compl_list);
88 pblk_rb_sync_end(&pblk->rwb, &flags);
91 /* When a write fails, we are not sure whether the block has grown bad or a page
92 * range is more susceptible to write errors. If a high number of pages fail, we
93 * assume that the block is bad and we mark it accordingly. In all cases, we
94 * remap and resubmit the failed entries as fast as possible; if a flush is
95 * waiting on a completion, the whole stack would stall otherwise.
97 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
99 void *comp_bits = &rqd->ppa_status;
100 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
101 struct pblk_rec_ctx *recovery;
102 struct ppa_addr *ppa_list = rqd->ppa_list;
103 int nr_ppas = rqd->nr_ppas;
104 unsigned int c_entries;
107 if (unlikely(nr_ppas == 1))
108 ppa_list = &rqd->ppa_addr;
110 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
112 INIT_LIST_HEAD(&recovery->failed);
115 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
116 struct pblk_rb_entry *entry;
120 if (bit > c_ctx->nr_valid) {
121 WARN_ONCE(1, "pblk: corrupted write request\n");
122 mempool_free(recovery, pblk->rec_pool);
127 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
129 pr_err("pblk: could not scan entry on write failure\n");
130 mempool_free(recovery, pblk->rec_pool);
134 /* The list is filled first and emptied afterwards. No need for
135 * protecting it with a lock
137 list_add_tail(&entry->index, &recovery->failed);
140 c_entries = find_first_bit(comp_bits, nr_ppas);
141 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
143 pr_err("pblk: could not recover from write failure\n");
144 mempool_free(recovery, pblk->rec_pool);
148 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
149 queue_work(pblk->close_wq, &recovery->ws_rec);
152 pblk_complete_write(pblk, rqd, c_ctx);
155 static void pblk_end_io_write(struct nvm_rq *rqd)
157 struct pblk *pblk = rqd->private;
158 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
161 pblk_log_write_err(pblk, rqd);
162 return pblk_end_w_fail(pblk, rqd);
164 #ifdef CONFIG_NVM_DEBUG
166 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
169 pblk_complete_write(pblk, rqd, c_ctx);
170 atomic_dec(&pblk->inflight_io);
173 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
175 struct pblk *pblk = rqd->private;
176 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
177 struct pblk_line *line = m_ctx->private;
178 struct pblk_emeta *emeta = line->emeta;
181 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
184 pblk_log_write_err(pblk, rqd);
185 pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
188 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
189 if (sync == emeta->nr_entries)
190 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
191 GFP_ATOMIC, pblk->close_wq);
193 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
195 atomic_dec(&pblk->inflight_io);
198 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
199 unsigned int nr_secs,
200 nvm_end_io_fn(*end_io))
202 struct nvm_tgt_dev *dev = pblk->dev;
204 /* Setup write request */
205 rqd->opcode = NVM_OP_PWRITE;
206 rqd->nr_ppas = nr_secs;
207 rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
209 rqd->end_io = end_io;
211 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
212 &rqd->dma_meta_list);
216 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
217 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
222 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
223 struct ppa_addr *erase_ppa)
225 struct pblk_line_meta *lm = &pblk->lm;
226 struct pblk_line *e_line = pblk_line_get_erase(pblk);
227 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
228 unsigned int valid = c_ctx->nr_valid;
229 unsigned int padded = c_ctx->nr_padded;
230 unsigned int nr_secs = valid + padded;
231 unsigned long *lun_bitmap;
234 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
237 c_ctx->lun_bitmap = lun_bitmap;
239 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
245 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
246 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
248 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
254 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
255 struct pblk_c_ctx *c_ctx)
257 struct pblk_line_meta *lm = &pblk->lm;
258 unsigned long *lun_bitmap;
261 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
265 c_ctx->lun_bitmap = lun_bitmap;
267 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
271 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
273 rqd->ppa_status = (u64)0;
274 rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
279 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
280 unsigned int secs_to_flush)
284 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
286 #ifdef CONFIG_NVM_DEBUG
287 if ((!secs_to_sync && secs_to_flush)
288 || (secs_to_sync < 0)
289 || (secs_to_sync > secs_avail && !secs_to_flush)) {
290 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
291 secs_avail, secs_to_sync, secs_to_flush);
298 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
300 struct nvm_tgt_dev *dev = pblk->dev;
301 struct nvm_geo *geo = &dev->geo;
302 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
303 struct pblk_line_meta *lm = &pblk->lm;
304 struct pblk_emeta *emeta = meta_line->emeta;
305 struct pblk_g_ctx *m_ctx;
310 int rq_ppas = pblk->min_write_pgs;
311 int id = meta_line->id;
316 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
318 m_ctx = nvm_rq_to_pdu(rqd);
319 m_ctx->private = meta_line;
321 rq_len = rq_ppas * geo->sec_size;
322 data = ((void *)emeta->buf) + emeta->mem;
324 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
325 l_mg->emeta_alloc_type, GFP_KERNEL);
330 bio->bi_iter.bi_sector = 0; /* internal bio */
331 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
334 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
338 for (i = 0; i < rqd->nr_ppas; ) {
339 spin_lock(&meta_line->lock);
340 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
341 spin_unlock(&meta_line->lock);
342 for (j = 0; j < rq_ppas; j++, i++, paddr++)
343 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
346 emeta->mem += rq_len;
347 if (emeta->mem >= lm->emeta_len[0]) {
348 spin_lock(&l_mg->close_lock);
349 list_del(&meta_line->list);
350 spin_unlock(&l_mg->close_lock);
353 pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
355 ret = pblk_submit_io(pblk, rqd);
357 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
364 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
365 spin_lock(&l_mg->close_lock);
366 pblk_dealloc_page(pblk, meta_line, rq_ppas);
367 list_add(&meta_line->list, &meta_line->list);
368 spin_unlock(&l_mg->close_lock);
372 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
376 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
377 struct pblk_line *meta_line,
378 struct nvm_rq *data_rqd)
380 struct nvm_tgt_dev *dev = pblk->dev;
381 struct nvm_geo *geo = &dev->geo;
382 struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
383 struct pblk_line *data_line = pblk_line_get_data(pblk);
384 struct ppa_addr ppa, ppa_opt;
388 /* Schedule a metadata I/O that is half the distance from the data I/O
389 * with regards to the number of LUNs forming the pblk instance. This
390 * balances LUN conflicts across every I/O.
392 * When the LUN configuration changes (e.g., due to GC), this distance
393 * can align, which would result on metadata and data I/Os colliding. In
394 * this case, modify the distance to not be optimal, but move the
395 * optimal in the right direction.
397 paddr = pblk_lookup_page(pblk, meta_line);
398 ppa = addr_to_gen_ppa(pblk, paddr, 0);
399 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
400 pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
402 if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
403 test_bit(pos_opt, data_line->blk_bitmap))
406 if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
407 data_line->meta_distance--;
412 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
413 struct nvm_rq *data_rqd)
415 struct pblk_line_meta *lm = &pblk->lm;
416 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
417 struct pblk_line *meta_line;
419 spin_lock(&l_mg->close_lock);
421 if (list_empty(&l_mg->emeta_list)) {
422 spin_unlock(&l_mg->close_lock);
425 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
426 if (meta_line->emeta->mem >= lm->emeta_len[0])
428 spin_unlock(&l_mg->close_lock);
430 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
436 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
438 struct ppa_addr erase_ppa;
439 struct pblk_line *meta_line;
442 ppa_set_empty(&erase_ppa);
444 /* Assign lbas to ppas and populate request structure */
445 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
447 pr_err("pblk: could not setup write request: %d\n", err);
451 meta_line = pblk_should_submit_meta_io(pblk, rqd);
453 /* Submit data write for current data line */
454 err = pblk_submit_io(pblk, rqd);
456 pr_err("pblk: data I/O submission failed: %d\n", err);
460 if (!ppa_empty(erase_ppa)) {
461 /* Submit erase for next data line */
462 if (pblk_blk_erase_async(pblk, erase_ppa)) {
463 struct pblk_line *e_line = pblk_line_get_erase(pblk);
464 struct nvm_tgt_dev *dev = pblk->dev;
465 struct nvm_geo *geo = &dev->geo;
468 atomic_inc(&e_line->left_eblks);
469 bit = pblk_ppa_to_pos(geo, erase_ppa);
470 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
475 /* Submit metadata write for previous data line */
476 err = pblk_submit_meta_io(pblk, meta_line);
478 pr_err("pblk: metadata I/O submission failed: %d", err);
486 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
488 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
489 struct bio *bio = rqd->bio;
491 if (c_ctx->nr_padded)
492 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
496 static int pblk_submit_write(struct pblk *pblk)
500 unsigned int secs_avail, secs_to_sync, secs_to_com;
501 unsigned int secs_to_flush;
504 /* If there are no sectors in the cache, flushes (bios without data)
505 * will be cleared on the cache threads
507 secs_avail = pblk_rb_read_count(&pblk->rwb);
511 secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
512 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
515 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
516 if (secs_to_sync > pblk->max_write_pgs) {
517 pr_err("pblk: bad buffer sync calculation\n");
521 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
522 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
524 bio = bio_alloc(GFP_KERNEL, secs_to_sync);
526 bio->bi_iter.bi_sector = 0; /* internal bio */
527 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
529 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
532 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
534 pr_err("pblk: corrupted write bio\n");
538 if (pblk_submit_io_set(pblk, rqd))
541 #ifdef CONFIG_NVM_DEBUG
542 atomic_long_add(secs_to_sync, &pblk->sub_writes);
548 pblk_free_write_rqd(pblk, rqd);
551 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
556 int pblk_write_ts(void *data)
558 struct pblk *pblk = data;
560 while (!kthread_should_stop()) {
561 if (!pblk_submit_write(pblk))
563 set_current_state(TASK_INTERRUPTIBLE);