1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-write.c - pblk's write path from write buffer to media
20 #include "pblk-trace.h"
22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23 struct pblk_c_ctx *c_ctx)
25 struct bio *original_bio;
26 struct pblk_rb *rwb = &pblk->rwb;
30 for (i = 0; i < c_ctx->nr_valid; i++) {
31 struct pblk_w_ctx *w_ctx;
32 int pos = c_ctx->sentry + i;
35 w_ctx = pblk_rb_w_ctx(rwb, pos);
36 flags = READ_ONCE(w_ctx->flags);
38 if (flags & PBLK_FLUSH_ENTRY) {
39 flags &= ~PBLK_FLUSH_ENTRY;
40 /* Release flags on context. Protect from writes */
41 smp_store_release(&w_ctx->flags, flags);
43 #ifdef CONFIG_NVM_PBLK_DEBUG
44 atomic_dec(&rwb->inflight_flush_point);
48 while ((original_bio = bio_list_pop(&w_ctx->bios)))
49 bio_endio(original_bio);
53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
56 #ifdef CONFIG_NVM_PBLK_DEBUG
57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
60 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
63 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
70 struct pblk_c_ctx *c_ctx)
72 list_del(&c_ctx->list);
73 return pblk_end_w_bio(pblk, rqd, c_ctx);
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77 struct pblk_c_ctx *c_ctx)
79 struct pblk_c_ctx *c, *r;
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
86 pblk_up_rq(pblk, c_ctx->lun_bitmap);
88 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89 if (pos == c_ctx->sentry) {
90 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
93 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94 rqd = nvm_rq_from_c_ctx(c);
95 if (c->sentry == pos) {
96 pos = pblk_end_queued_w_bio(pblk, rqd, c);
101 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102 list_add_tail(&c_ctx->list, &pblk->compl_list);
104 pblk_rb_sync_end(&pblk->rwb, &flags);
107 /* Map remaining sectors in chunk, starting from ppa */
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
110 struct pblk_line *line;
111 struct ppa_addr map_ppa = *ppa;
115 line = pblk_ppa_to_line(pblk, *ppa);
116 spin_lock(&line->lock);
119 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
121 if (!test_and_set_bit(paddr, line->map_bitmap))
124 if (!test_and_set_bit(paddr, line->invalid_bitmap))
125 le32_add_cpu(line->vsc, -1);
127 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
130 line->w_err_gc->has_write_err = 1;
131 spin_unlock(&line->lock);
134 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
135 unsigned int nr_entries)
137 struct pblk_rb *rb = &pblk->rwb;
138 struct pblk_rb_entry *entry;
139 struct pblk_line *line;
140 struct pblk_w_ctx *w_ctx;
141 struct ppa_addr ppa_l2p;
145 spin_lock(&pblk->trans_lock);
146 for (i = 0; i < nr_entries; i++) {
147 entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
148 w_ctx = &entry->w_ctx;
150 /* Check if the lba has been overwritten */
151 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
152 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
153 w_ctx->lba = ADDR_EMPTY;
155 /* Mark up the entry as submittable again */
156 flags = READ_ONCE(w_ctx->flags);
157 flags |= PBLK_WRITTEN_DATA;
158 /* Release flags on write context. Protect from writes */
159 smp_store_release(&w_ctx->flags, flags);
161 /* Decrease the reference count to the line as we will
162 * re-map these entries
164 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
165 kref_put(&line->ref, pblk_line_put);
167 spin_unlock(&pblk->trans_lock);
170 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
172 struct pblk_c_ctx *r_ctx;
174 r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
178 r_ctx->lun_bitmap = NULL;
179 r_ctx->sentry = c_ctx->sentry;
180 r_ctx->nr_valid = c_ctx->nr_valid;
181 r_ctx->nr_padded = c_ctx->nr_padded;
183 spin_lock(&pblk->resubmit_lock);
184 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
185 spin_unlock(&pblk->resubmit_lock);
187 #ifdef CONFIG_NVM_PBLK_DEBUG
188 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
192 static void pblk_submit_rec(struct work_struct *work)
194 struct pblk_rec_ctx *recovery =
195 container_of(work, struct pblk_rec_ctx, ws_rec);
196 struct pblk *pblk = recovery->pblk;
197 struct nvm_rq *rqd = recovery->rqd;
198 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
199 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
201 pblk_log_write_err(pblk, rqd);
203 pblk_map_remaining(pblk, ppa_list);
204 pblk_queue_resubmit(pblk, c_ctx);
206 pblk_up_rq(pblk, c_ctx->lun_bitmap);
207 if (c_ctx->nr_padded)
208 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
211 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
212 mempool_free(recovery, &pblk->rec_pool);
214 atomic_dec(&pblk->inflight_io);
218 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
220 struct pblk_rec_ctx *recovery;
222 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
224 pblk_err(pblk, "could not allocate recovery work\n");
228 recovery->pblk = pblk;
231 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
232 queue_work(pblk->close_wq, &recovery->ws_rec);
235 static void pblk_end_io_write(struct nvm_rq *rqd)
237 struct pblk *pblk = rqd->private;
238 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
241 pblk_end_w_fail(pblk, rqd);
244 if (trace_pblk_chunk_state_enabled())
245 pblk_check_chunk_state_update(pblk, rqd);
246 #ifdef CONFIG_NVM_PBLK_DEBUG
247 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
251 pblk_complete_write(pblk, rqd, c_ctx);
252 atomic_dec(&pblk->inflight_io);
255 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
257 struct pblk *pblk = rqd->private;
258 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
259 struct pblk_line *line = m_ctx->private;
260 struct pblk_emeta *emeta = line->emeta;
261 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
264 pblk_up_chunk(pblk, ppa_list[0]);
267 pblk_log_write_err(pblk, rqd);
268 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
269 line->w_err_gc->has_write_err = 1;
271 if (trace_pblk_chunk_state_enabled())
272 pblk_check_chunk_state_update(pblk, rqd);
275 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
276 if (sync == emeta->nr_entries)
277 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
278 GFP_ATOMIC, pblk->close_wq);
280 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
282 atomic_dec(&pblk->inflight_io);
285 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
286 unsigned int nr_secs, nvm_end_io_fn(*end_io))
288 /* Setup write request */
289 rqd->opcode = NVM_OP_PWRITE;
290 rqd->nr_ppas = nr_secs;
293 rqd->end_io = end_io;
295 return pblk_alloc_rqd_meta(pblk, rqd);
298 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
299 struct ppa_addr *erase_ppa)
301 struct pblk_line_meta *lm = &pblk->lm;
302 struct pblk_line *e_line = pblk_line_get_erase(pblk);
303 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
304 unsigned int valid = c_ctx->nr_valid;
305 unsigned int padded = c_ctx->nr_padded;
306 unsigned int nr_secs = valid + padded;
307 unsigned long *lun_bitmap;
310 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
313 c_ctx->lun_bitmap = lun_bitmap;
315 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
321 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
322 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
324 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
330 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
331 unsigned int secs_to_flush)
335 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
337 #ifdef CONFIG_NVM_PBLK_DEBUG
338 if ((!secs_to_sync && secs_to_flush)
339 || (secs_to_sync < 0)
340 || (secs_to_sync > secs_avail && !secs_to_flush)) {
341 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
342 secs_avail, secs_to_sync, secs_to_flush);
349 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
351 struct nvm_tgt_dev *dev = pblk->dev;
352 struct nvm_geo *geo = &dev->geo;
353 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
354 struct pblk_line_meta *lm = &pblk->lm;
355 struct pblk_emeta *emeta = meta_line->emeta;
356 struct ppa_addr *ppa_list;
357 struct pblk_g_ctx *m_ctx;
362 int rq_ppas = pblk->min_write_pgs;
363 int id = meta_line->id;
368 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
370 m_ctx = nvm_rq_to_pdu(rqd);
371 m_ctx->private = meta_line;
373 rq_len = rq_ppas * geo->csecs;
374 data = ((void *)emeta->buf) + emeta->mem;
376 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
377 l_mg->emeta_alloc_type, GFP_KERNEL);
379 pblk_err(pblk, "failed to map emeta io");
383 bio->bi_iter.bi_sector = 0; /* internal bio */
384 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
387 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
391 ppa_list = nvm_rq_to_ppa_list(rqd);
392 for (i = 0; i < rqd->nr_ppas; ) {
393 spin_lock(&meta_line->lock);
394 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
395 spin_unlock(&meta_line->lock);
396 for (j = 0; j < rq_ppas; j++, i++, paddr++)
397 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
400 spin_lock(&l_mg->close_lock);
401 emeta->mem += rq_len;
402 if (emeta->mem >= lm->emeta_len[0])
403 list_del(&meta_line->list);
404 spin_unlock(&l_mg->close_lock);
406 pblk_down_chunk(pblk, ppa_list[0]);
408 ret = pblk_submit_io(pblk, rqd);
410 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
417 pblk_up_chunk(pblk, ppa_list[0]);
418 spin_lock(&l_mg->close_lock);
419 pblk_dealloc_page(pblk, meta_line, rq_ppas);
420 list_add(&meta_line->list, &meta_line->list);
421 spin_unlock(&l_mg->close_lock);
425 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
429 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
430 struct pblk_line *meta_line,
431 struct nvm_rq *data_rqd)
433 struct nvm_tgt_dev *dev = pblk->dev;
434 struct nvm_geo *geo = &dev->geo;
435 struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
436 struct pblk_line *data_line = pblk_line_get_data(pblk);
437 struct ppa_addr ppa, ppa_opt;
441 /* Schedule a metadata I/O that is half the distance from the data I/O
442 * with regards to the number of LUNs forming the pblk instance. This
443 * balances LUN conflicts across every I/O.
445 * When the LUN configuration changes (e.g., due to GC), this distance
446 * can align, which would result on metadata and data I/Os colliding. In
447 * this case, modify the distance to not be optimal, but move the
448 * optimal in the right direction.
450 paddr = pblk_lookup_page(pblk, meta_line);
451 ppa = addr_to_gen_ppa(pblk, paddr, 0);
452 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
453 pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
455 if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
456 test_bit(pos_opt, data_line->blk_bitmap))
459 if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
460 data_line->meta_distance--;
465 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
466 struct nvm_rq *data_rqd)
468 struct pblk_line_meta *lm = &pblk->lm;
469 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
470 struct pblk_line *meta_line;
472 spin_lock(&l_mg->close_lock);
473 if (list_empty(&l_mg->emeta_list)) {
474 spin_unlock(&l_mg->close_lock);
477 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
478 if (meta_line->emeta->mem >= lm->emeta_len[0]) {
479 spin_unlock(&l_mg->close_lock);
482 spin_unlock(&l_mg->close_lock);
484 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
490 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
492 struct ppa_addr erase_ppa;
493 struct pblk_line *meta_line;
496 pblk_ppa_set_empty(&erase_ppa);
498 /* Assign lbas to ppas and populate request structure */
499 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
501 pblk_err(pblk, "could not setup write request: %d\n", err);
505 meta_line = pblk_should_submit_meta_io(pblk, rqd);
507 /* Submit data write for current data line */
508 err = pblk_submit_io(pblk, rqd);
510 pblk_err(pblk, "data I/O submission failed: %d\n", err);
514 if (!pblk_ppa_empty(erase_ppa)) {
515 /* Submit erase for next data line */
516 if (pblk_blk_erase_async(pblk, erase_ppa)) {
517 struct pblk_line *e_line = pblk_line_get_erase(pblk);
518 struct nvm_tgt_dev *dev = pblk->dev;
519 struct nvm_geo *geo = &dev->geo;
522 atomic_inc(&e_line->left_eblks);
523 bit = pblk_ppa_to_pos(geo, erase_ppa);
524 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
529 /* Submit metadata write for previous data line */
530 err = pblk_submit_meta_io(pblk, meta_line);
532 pblk_err(pblk, "metadata I/O submission failed: %d",
541 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
543 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
544 struct bio *bio = rqd->bio;
546 if (c_ctx->nr_padded)
547 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
551 static int pblk_submit_write(struct pblk *pblk)
555 unsigned int secs_avail, secs_to_sync, secs_to_com;
556 unsigned int secs_to_flush;
558 unsigned int resubmit;
560 spin_lock(&pblk->resubmit_lock);
561 resubmit = !list_empty(&pblk->resubmit_list);
562 spin_unlock(&pblk->resubmit_lock);
564 /* Resubmit failed writes first */
566 struct pblk_c_ctx *r_ctx;
568 spin_lock(&pblk->resubmit_lock);
569 r_ctx = list_first_entry(&pblk->resubmit_list,
570 struct pblk_c_ctx, list);
571 list_del(&r_ctx->list);
572 spin_unlock(&pblk->resubmit_lock);
574 secs_avail = r_ctx->nr_valid;
577 pblk_prepare_resubmit(pblk, pos, secs_avail);
578 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
583 /* If there are no sectors in the cache,
584 * flushes (bios without data) will be cleared on
587 secs_avail = pblk_rb_read_count(&pblk->rwb);
591 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
592 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
595 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
597 if (secs_to_sync > pblk->max_write_pgs) {
598 pblk_err(pblk, "bad buffer sync calculation\n");
602 secs_to_com = (secs_to_sync > secs_avail) ?
603 secs_avail : secs_to_sync;
604 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
607 bio = bio_alloc(GFP_KERNEL, secs_to_sync);
609 bio->bi_iter.bi_sector = 0; /* internal bio */
610 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
612 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
615 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
617 pblk_err(pblk, "corrupted write bio\n");
621 if (pblk_submit_io_set(pblk, rqd))
624 #ifdef CONFIG_NVM_PBLK_DEBUG
625 atomic_long_add(secs_to_sync, &pblk->sub_writes);
631 pblk_free_write_rqd(pblk, rqd);
634 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
639 int pblk_write_ts(void *data)
641 struct pblk *pblk = data;
643 while (!kthread_should_stop()) {
644 if (!pblk_submit_write(pblk))
646 set_current_state(TASK_INTERRUPTIBLE);