]> Git Repo - linux.git/blame - drivers/lightnvm/pblk-core.c
Merge tag 'media/v4.20-3' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux.git] / drivers / lightnvm / pblk-core.c
CommitLineData
02a1520d 1// SPDX-License-Identifier: GPL-2.0
a4bd217b
JG
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <[email protected]>
5 * Matias Bjorling <[email protected]>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-core.c - pblk's core functionality
17 *
18 */
19
4c44abf4
HH
20#define CREATE_TRACE_POINTS
21
a4bd217b 22#include "pblk.h"
4c44abf4 23#include "pblk-trace.h"
a4bd217b 24
8bd40020
JG
25static void pblk_line_mark_bb(struct work_struct *work)
26{
27 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
28 ws);
29 struct pblk *pblk = line_ws->pblk;
30 struct nvm_tgt_dev *dev = pblk->dev;
31 struct ppa_addr *ppa = line_ws->priv;
32 int ret;
33
aff3fb18 34 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
8bd40020
JG
35 if (ret) {
36 struct pblk_line *line;
37 int pos;
38
cb21665c 39 line = pblk_ppa_to_line(pblk, *ppa);
b1bcfda1 40 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
8bd40020 41
4e495a46 42 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
8bd40020
JG
43 line->id, pos);
44 }
45
46 kfree(ppa);
b906bbb6 47 mempool_free(line_ws, &pblk->gen_ws_pool);
8bd40020
JG
48}
49
a4bd217b 50static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
32ef9412 51 struct ppa_addr ppa_addr)
a4bd217b
JG
52{
53 struct nvm_tgt_dev *dev = pblk->dev;
54 struct nvm_geo *geo = &dev->geo;
32ef9412
JG
55 struct ppa_addr *ppa;
56 int pos = pblk_ppa_to_pos(geo, ppa_addr);
a4bd217b 57
4e495a46 58 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
a4bd217b
JG
59 atomic_long_inc(&pblk->erase_failed);
60
a44f53fa 61 atomic_dec(&line->blk_in_line);
a4bd217b 62 if (test_and_set_bit(pos, line->blk_bitmap))
4e495a46 63 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
a4bd217b
JG
64 line->id, pos);
65
32ef9412
JG
66 /* Not necessary to mark bad blocks on 2.0 spec. */
67 if (geo->version == NVM_OCSSD_SPEC_20)
68 return;
69
70 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71 if (!ppa)
72 return;
73
74 *ppa = ppa_addr;
b84ae4a8
JG
75 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76 GFP_ATOMIC, pblk->bb_wq);
a4bd217b
JG
77}
78
79static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
80{
32ef9412
JG
81 struct nvm_tgt_dev *dev = pblk->dev;
82 struct nvm_geo *geo = &dev->geo;
83 struct nvm_chk_meta *chunk;
a4bd217b 84 struct pblk_line *line;
32ef9412 85 int pos;
a4bd217b 86
cb21665c 87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
32ef9412
JG
88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
89 chunk = &line->chks[pos];
90
a4bd217b
JG
91 atomic_dec(&line->left_seblks);
92
93 if (rqd->error) {
4209c31c
HH
94 trace_pblk_chunk_reset(pblk_disk_name(pblk),
95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
96
32ef9412
JG
97 chunk->state = NVM_CHK_ST_OFFLINE;
98 pblk_mark_bb(pblk, line, rqd->ppa_addr);
99 } else {
4209c31c
HH
100 trace_pblk_chunk_reset(pblk_disk_name(pblk),
101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
102
32ef9412 103 chunk->state = NVM_CHK_ST_FREE;
a4bd217b 104 }
588726d3 105
4c44abf4
HH
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
107 chunk->state);
108
588726d3 109 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
110}
111
112/* Erase completion assumes that only one block is erased at the time */
113static void pblk_end_io_erase(struct nvm_rq *rqd)
114{
115 struct pblk *pblk = rqd->private;
116
a4bd217b 117 __pblk_end_io_erase(pblk, rqd);
b906bbb6 118 mempool_free(rqd, &pblk->e_rq_pool);
a4bd217b
JG
119}
120
32ef9412
JG
121/*
122 * Get information for all chunks from the device.
123 *
090ee26f 124 * The caller is responsible for freeing (vmalloc) the returned structure
32ef9412 125 */
aff3fb18 126struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
32ef9412
JG
127{
128 struct nvm_tgt_dev *dev = pblk->dev;
129 struct nvm_geo *geo = &dev->geo;
130 struct nvm_chk_meta *meta;
131 struct ppa_addr ppa;
132 unsigned long len;
133 int ret;
134
135 ppa.ppa = 0;
136
137 len = geo->all_chunks * sizeof(*meta);
090ee26f 138 meta = vzalloc(len);
32ef9412
JG
139 if (!meta)
140 return ERR_PTR(-ENOMEM);
141
aff3fb18 142 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
32ef9412
JG
143 if (ret) {
144 kfree(meta);
145 return ERR_PTR(-EIO);
146 }
147
148 return meta;
149}
150
151struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
152 struct nvm_chk_meta *meta,
153 struct ppa_addr ppa)
154{
155 struct nvm_tgt_dev *dev = pblk->dev;
156 struct nvm_geo *geo = &dev->geo;
157 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158 int lun_off = ppa.m.pu * geo->num_chk;
159 int chk_off = ppa.m.chk;
160
161 return meta + ch_off + lun_off + chk_off;
162}
163
0880a9aa
JG
164void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
165 u64 paddr)
a4bd217b
JG
166{
167 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
168 struct list_head *move_list = NULL;
169
170 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171 * table is modified with reclaimed sectors, a check is done to endure
172 * that newer updates are not overwritten.
173 */
174 spin_lock(&line->lock);
d340121e 175 WARN_ON(line->state == PBLK_LINESTATE_FREE);
a4bd217b
JG
176
177 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
178 WARN_ONCE(1, "pblk: double invalidate\n");
179 spin_unlock(&line->lock);
180 return;
181 }
dd2a4343 182 le32_add_cpu(line->vsc, -1);
a4bd217b
JG
183
184 if (line->state == PBLK_LINESTATE_CLOSED)
185 move_list = pblk_line_gc_list(pblk, line);
186 spin_unlock(&line->lock);
187
188 if (move_list) {
189 spin_lock(&l_mg->gc_lock);
190 spin_lock(&line->lock);
191 /* Prevent moving a line that has just been chosen for GC */
d340121e 192 if (line->state == PBLK_LINESTATE_GC) {
a4bd217b
JG
193 spin_unlock(&line->lock);
194 spin_unlock(&l_mg->gc_lock);
195 return;
196 }
197 spin_unlock(&line->lock);
198
199 list_move_tail(&line->list, move_list);
200 spin_unlock(&l_mg->gc_lock);
201 }
202}
203
204void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
205{
206 struct pblk_line *line;
207 u64 paddr;
a4bd217b 208
880eda54 209#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
210 /* Callers must ensure that the ppa points to a device address */
211 BUG_ON(pblk_addr_in_cache(ppa));
212 BUG_ON(pblk_ppa_empty(ppa));
213#endif
214
cb21665c 215 line = pblk_ppa_to_line(pblk, ppa);
a4bd217b
JG
216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
217
218 __pblk_map_invalidate(pblk, line, paddr);
219}
220
a4bd217b
JG
221static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
222 unsigned int nr_secs)
223{
224 sector_t lba;
225
226 spin_lock(&pblk->trans_lock);
227 for (lba = slba; lba < slba + nr_secs; lba++) {
228 struct ppa_addr ppa;
229
230 ppa = pblk_trans_map_get(pblk, lba);
231
232 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233 pblk_map_invalidate(pblk, ppa);
234
235 pblk_ppa_set_empty(&ppa);
236 pblk_trans_map_set(pblk, lba, ppa);
237 }
238 spin_unlock(&pblk->trans_lock);
239}
240
45dcf29b
JG
241int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
242{
243 struct nvm_tgt_dev *dev = pblk->dev;
244
245 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246 &rqd->dma_meta_list);
247 if (!rqd->meta_list)
248 return -ENOMEM;
249
250 if (rqd->nr_ppas == 1)
251 return 0;
252
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
255
256 return 0;
257}
258
259void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
260{
261 struct nvm_tgt_dev *dev = pblk->dev;
262
263 if (rqd->meta_list)
264 nvm_dev_dma_free(dev->parent, rqd->meta_list,
265 rqd->dma_meta_list);
266}
267
67bf26a3
JG
268/* Caller must guarantee that the request is a valid type */
269struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
a4bd217b
JG
270{
271 mempool_t *pool;
272 struct nvm_rq *rqd;
273 int rq_size;
274
67bf26a3
JG
275 switch (type) {
276 case PBLK_WRITE:
277 case PBLK_WRITE_INT:
b906bbb6 278 pool = &pblk->w_rq_pool;
a4bd217b 279 rq_size = pblk_w_rq_size;
67bf26a3
JG
280 break;
281 case PBLK_READ:
b906bbb6 282 pool = &pblk->r_rq_pool;
084ec9ba 283 rq_size = pblk_g_rq_size;
67bf26a3
JG
284 break;
285 default:
b906bbb6 286 pool = &pblk->e_rq_pool;
67bf26a3 287 rq_size = pblk_g_rq_size;
a4bd217b
JG
288 }
289
290 rqd = mempool_alloc(pool, GFP_KERNEL);
291 memset(rqd, 0, rq_size);
292
293 return rqd;
294}
295
67bf26a3
JG
296/* Typically used on completion path. Cannot guarantee request consistency */
297void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
a4bd217b
JG
298{
299 mempool_t *pool;
300
67bf26a3
JG
301 switch (type) {
302 case PBLK_WRITE:
303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
884b031b 304 /* fall through */
67bf26a3 305 case PBLK_WRITE_INT:
b906bbb6 306 pool = &pblk->w_rq_pool;
67bf26a3
JG
307 break;
308 case PBLK_READ:
b906bbb6 309 pool = &pblk->r_rq_pool;
67bf26a3
JG
310 break;
311 case PBLK_ERASE:
b906bbb6 312 pool = &pblk->e_rq_pool;
67bf26a3
JG
313 break;
314 default:
4e495a46 315 pblk_err(pblk, "trying to free unknown rqd type\n");
67bf26a3
JG
316 return;
317 }
a4bd217b 318
45dcf29b 319 pblk_free_rqd_meta(pblk, rqd);
a4bd217b
JG
320 mempool_free(rqd, pool);
321}
322
323void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
324 int nr_pages)
325{
326 struct bio_vec bv;
327 int i;
328
329 WARN_ON(off + nr_pages != bio->bi_vcnt);
330
a4bd217b
JG
331 for (i = off; i < nr_pages + off; i++) {
332 bv = bio->bi_io_vec[i];
b906bbb6 333 mempool_free(bv.bv_page, &pblk->page_bio_pool);
a4bd217b
JG
334 }
335}
336
337int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
338 int nr_pages)
339{
340 struct request_queue *q = pblk->dev->q;
341 struct page *page;
342 int i, ret;
343
344 for (i = 0; i < nr_pages; i++) {
b906bbb6 345 page = mempool_alloc(&pblk->page_bio_pool, flags);
a4bd217b
JG
346
347 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
348 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
4e495a46 349 pblk_err(pblk, "could not add page to bio\n");
b906bbb6 350 mempool_free(page, &pblk->page_bio_pool);
a4bd217b
JG
351 goto err;
352 }
353 }
354
355 return 0;
356err:
f142ac0b 357 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
a4bd217b
JG
358 return -1;
359}
360
cc9c9a00 361void pblk_write_kick(struct pblk *pblk)
a4bd217b
JG
362{
363 wake_up_process(pblk->writer_ts);
364 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
365}
366
87c1d2d3 367void pblk_write_timer_fn(struct timer_list *t)
a4bd217b 368{
87c1d2d3 369 struct pblk *pblk = from_timer(pblk, t, wtimer);
a4bd217b
JG
370
371 /* kick the write thread every tick to flush outstanding data */
372 pblk_write_kick(pblk);
373}
374
375void pblk_write_should_kick(struct pblk *pblk)
376{
377 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
378
379 if (secs_avail >= pblk->min_write_pgs)
380 pblk_write_kick(pblk);
381}
382
8bd40020 383static void pblk_wait_for_meta(struct pblk *pblk)
a4bd217b 384{
588726d3
JG
385 do {
386 if (!atomic_read(&pblk->inflight_io))
387 break;
a4bd217b 388
588726d3
JG
389 schedule();
390 } while (1);
391}
a4bd217b 392
588726d3
JG
393static void pblk_flush_writer(struct pblk *pblk)
394{
395 pblk_rb_flush(&pblk->rwb);
396 do {
ee8d5c1a 397 if (!pblk_rb_sync_count(&pblk->rwb))
588726d3 398 break;
a4bd217b 399
ee8d5c1a 400 pblk_write_kick(pblk);
588726d3
JG
401 schedule();
402 } while (1);
a4bd217b
JG
403}
404
405struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
406{
407 struct pblk_line_meta *lm = &pblk->lm;
408 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
409 struct list_head *move_list = NULL;
dd2a4343 410 int vsc = le32_to_cpu(*line->vsc);
a4bd217b 411
476118c9
JG
412 lockdep_assert_held(&line->lock);
413
48b8d208
HH
414 if (line->w_err_gc->has_write_err) {
415 if (line->gc_group != PBLK_LINEGC_WERR) {
416 line->gc_group = PBLK_LINEGC_WERR;
417 move_list = &l_mg->gc_werr_list;
418 pblk_rl_werr_line_in(&pblk->rl);
419 }
420 } else if (!vsc) {
a4bd217b
JG
421 if (line->gc_group != PBLK_LINEGC_FULL) {
422 line->gc_group = PBLK_LINEGC_FULL;
423 move_list = &l_mg->gc_full_list;
424 }
b20ba1bc 425 } else if (vsc < lm->high_thrs) {
a4bd217b
JG
426 if (line->gc_group != PBLK_LINEGC_HIGH) {
427 line->gc_group = PBLK_LINEGC_HIGH;
428 move_list = &l_mg->gc_high_list;
429 }
b20ba1bc 430 } else if (vsc < lm->mid_thrs) {
a4bd217b
JG
431 if (line->gc_group != PBLK_LINEGC_MID) {
432 line->gc_group = PBLK_LINEGC_MID;
433 move_list = &l_mg->gc_mid_list;
434 }
dd2a4343 435 } else if (vsc < line->sec_in_line) {
a4bd217b
JG
436 if (line->gc_group != PBLK_LINEGC_LOW) {
437 line->gc_group = PBLK_LINEGC_LOW;
438 move_list = &l_mg->gc_low_list;
439 }
dd2a4343 440 } else if (vsc == line->sec_in_line) {
a4bd217b
JG
441 if (line->gc_group != PBLK_LINEGC_EMPTY) {
442 line->gc_group = PBLK_LINEGC_EMPTY;
443 move_list = &l_mg->gc_empty_list;
444 }
445 } else {
446 line->state = PBLK_LINESTATE_CORRUPT;
f2937232
HH
447 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
448 line->state);
449
a4bd217b
JG
450 line->gc_group = PBLK_LINEGC_NONE;
451 move_list = &l_mg->corrupt_list;
4e495a46 452 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
dd2a4343 453 line->id, vsc,
a4bd217b
JG
454 line->sec_in_line,
455 lm->high_thrs, lm->mid_thrs);
456 }
457
458 return move_list;
459}
460
461void pblk_discard(struct pblk *pblk, struct bio *bio)
462{
463 sector_t slba = pblk_get_lba(bio);
464 sector_t nr_secs = pblk_get_secs(bio);
465
466 pblk_invalidate_range(pblk, slba, nr_secs);
467}
468
a4bd217b
JG
469void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
470{
471 atomic_long_inc(&pblk->write_failed);
880eda54 472#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
473 pblk_print_failed_rqd(pblk, rqd, rqd->error);
474#endif
475}
476
477void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
478{
479 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
480 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
481 atomic_long_inc(&pblk->read_empty);
482 return;
483 }
484
485 switch (rqd->error) {
486 case NVM_RSP_WARN_HIGHECC:
487 atomic_long_inc(&pblk->read_high_ecc);
488 break;
489 case NVM_RSP_ERR_FAILECC:
490 case NVM_RSP_ERR_FAILCRC:
491 atomic_long_inc(&pblk->read_failed);
492 break;
493 default:
4e495a46 494 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
a4bd217b 495 }
880eda54 496#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
497 pblk_print_failed_rqd(pblk, rqd, rqd->error);
498#endif
499}
500
c2e9f5d4
JG
501void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
502{
503 pblk->sec_per_write = sec_per_write;
504}
505
a4bd217b
JG
506int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
507{
508 struct nvm_tgt_dev *dev = pblk->dev;
509
b6730dd4 510 atomic_inc(&pblk->inflight_io);
a4bd217b 511
880eda54 512#ifdef CONFIG_NVM_PBLK_DEBUG
b6730dd4
JG
513 if (pblk_check_io(pblk, rqd))
514 return NVM_IO_ERR;
1a94b2d4 515#endif
a4bd217b 516
1a94b2d4
JG
517 return nvm_submit_io(dev, rqd);
518}
a4bd217b 519
4c44abf4
HH
520void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
521{
522 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
523
524 int i;
525
526 for (i = 0; i < rqd->nr_ppas; i++) {
527 struct ppa_addr *ppa = &ppa_list[i];
528 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
529 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
530
531 if (caddr == 0)
532 trace_pblk_chunk_state(pblk_disk_name(pblk),
533 ppa, NVM_CHK_ST_OPEN);
534 else if (caddr == chunk->cnlb)
535 trace_pblk_chunk_state(pblk_disk_name(pblk),
536 ppa, NVM_CHK_ST_CLOSED);
537 }
538}
539
1a94b2d4
JG
540int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
541{
542 struct nvm_tgt_dev *dev = pblk->dev;
4c44abf4 543 int ret;
1a94b2d4 544
b6730dd4 545 atomic_inc(&pblk->inflight_io);
1a94b2d4 546
880eda54 547#ifdef CONFIG_NVM_PBLK_DEBUG
b6730dd4
JG
548 if (pblk_check_io(pblk, rqd))
549 return NVM_IO_ERR;
a4bd217b 550#endif
588726d3 551
4c44abf4
HH
552 ret = nvm_submit_io_sync(dev, rqd);
553
554 if (trace_pblk_chunk_state_enabled() && !ret &&
555 rqd->opcode == NVM_OP_PWRITE)
556 pblk_check_chunk_state_update(pblk, rqd);
557
558 return ret;
a4bd217b
JG
559}
560
253babc3
JG
561int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
562{
563 struct ppa_addr *ppa_list;
564 int ret;
565
566 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
567
568 pblk_down_chunk(pblk, ppa_list[0]);
569 ret = pblk_submit_io_sync(pblk, rqd);
570 pblk_up_chunk(pblk, ppa_list[0]);
571
572 return ret;
573}
574
55e836d4
JG
575static void pblk_bio_map_addr_endio(struct bio *bio)
576{
577 bio_put(bio);
578}
579
a4bd217b
JG
580struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
581 unsigned int nr_secs, unsigned int len,
de54e703 582 int alloc_type, gfp_t gfp_mask)
a4bd217b
JG
583{
584 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b
JG
585 void *kaddr = data;
586 struct page *page;
587 struct bio *bio;
588 int i, ret;
589
de54e703 590 if (alloc_type == PBLK_KMALLOC_META)
a4bd217b
JG
591 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
592
593 bio = bio_kmalloc(gfp_mask, nr_secs);
594 if (!bio)
595 return ERR_PTR(-ENOMEM);
596
597 for (i = 0; i < nr_secs; i++) {
598 page = vmalloc_to_page(kaddr);
599 if (!page) {
4e495a46 600 pblk_err(pblk, "could not map vmalloc bio\n");
a4bd217b
JG
601 bio_put(bio);
602 bio = ERR_PTR(-ENOMEM);
603 goto out;
604 }
605
606 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
607 if (ret != PAGE_SIZE) {
4e495a46 608 pblk_err(pblk, "could not add page to bio\n");
a4bd217b
JG
609 bio_put(bio);
610 bio = ERR_PTR(-ENOMEM);
611 goto out;
612 }
613
614 kaddr += PAGE_SIZE;
615 }
55e836d4
JG
616
617 bio->bi_end_io = pblk_bio_map_addr_endio;
a4bd217b
JG
618out:
619 return bio;
620}
621
622int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
623 unsigned long secs_to_flush)
624{
c2e9f5d4 625 int max = pblk->sec_per_write;
a4bd217b
JG
626 int min = pblk->min_write_pgs;
627 int secs_to_sync = 0;
628
629 if (secs_avail >= max)
630 secs_to_sync = max;
631 else if (secs_avail >= min)
632 secs_to_sync = min * (secs_avail / min);
633 else if (secs_to_flush)
634 secs_to_sync = min;
635
636 return secs_to_sync;
637}
638
dd2a4343
JG
639void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
640{
641 u64 addr;
642 int i;
643
e57903fd 644 spin_lock(&line->lock);
dd2a4343
JG
645 addr = find_next_zero_bit(line->map_bitmap,
646 pblk->lm.sec_per_line, line->cur_sec);
647 line->cur_sec = addr - nr_secs;
648
649 for (i = 0; i < nr_secs; i++, line->cur_sec--)
650 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
e57903fd 651 spin_unlock(&line->lock);
dd2a4343
JG
652}
653
654u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
a4bd217b
JG
655{
656 u64 addr;
657 int i;
658
476118c9
JG
659 lockdep_assert_held(&line->lock);
660
a4bd217b
JG
661 /* logic error: ppa out-of-bounds. Prevent generating bad address */
662 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
663 WARN(1, "pblk: page allocation out of bounds\n");
664 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
665 }
666
667 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
668 pblk->lm.sec_per_line, line->cur_sec);
669 for (i = 0; i < nr_secs; i++, line->cur_sec++)
670 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
671
672 return addr;
673}
674
675u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
676{
677 u64 addr;
678
679 /* Lock needed in case a write fails and a recovery needs to remap
680 * failed write buffer entries
681 */
682 spin_lock(&line->lock);
683 addr = __pblk_alloc_page(pblk, line, nr_secs);
684 line->left_msecs -= nr_secs;
685 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
686 spin_unlock(&line->lock);
687
688 return addr;
689}
690
dd2a4343
JG
691u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
692{
693 u64 paddr;
694
695 spin_lock(&line->lock);
696 paddr = find_next_zero_bit(line->map_bitmap,
697 pblk->lm.sec_per_line, line->cur_sec);
698 spin_unlock(&line->lock);
699
700 return paddr;
701}
702
af3fac16 703u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
a4bd217b
JG
704{
705 struct nvm_tgt_dev *dev = pblk->dev;
706 struct nvm_geo *geo = &dev->geo;
707 struct pblk_line_meta *lm = &pblk->lm;
af3fac16 708 int bit;
a4bd217b 709
af3fac16
JG
710 /* This usually only happens on bad lines */
711 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
712 if (bit >= lm->blk_per_line)
713 return -1;
a4bd217b 714
af3fac16
JG
715 return bit * geo->ws_opt;
716}
a4bd217b 717
af3fac16
JG
718int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
719{
720 struct nvm_tgt_dev *dev = pblk->dev;
721 struct pblk_line_meta *lm = &pblk->lm;
722 struct bio *bio;
723 struct nvm_rq rqd;
724 u64 paddr = pblk_line_smeta_start(pblk, line);
725 int i, ret;
63e3809c 726
a4bd217b
JG
727 memset(&rqd, 0, sizeof(struct nvm_rq));
728
af3fac16
JG
729 ret = pblk_alloc_rqd_meta(pblk, &rqd);
730 if (ret)
731 return ret;
a4bd217b 732
af3fac16 733 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
a4bd217b
JG
734 if (IS_ERR(bio)) {
735 ret = PTR_ERR(bio);
af3fac16 736 goto clear_rqd;
a4bd217b
JG
737 }
738
739 bio->bi_iter.bi_sector = 0; /* internal bio */
af3fac16 740 bio_set_op_attrs(bio, REQ_OP_READ, 0);
a4bd217b
JG
741
742 rqd.bio = bio;
af3fac16
JG
743 rqd.opcode = NVM_OP_PREAD;
744 rqd.nr_ppas = lm->smeta_sec;
745 rqd.is_seq = 1;
a4bd217b 746
af3fac16
JG
747 for (i = 0; i < lm->smeta_sec; i++, paddr++)
748 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
a4bd217b 749
1a94b2d4 750 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b 751 if (ret) {
af3fac16 752 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
a4bd217b 753 bio_put(bio);
af3fac16 754 goto clear_rqd;
a4bd217b
JG
755 }
756
588726d3 757 atomic_dec(&pblk->inflight_io);
a4bd217b 758
af3fac16
JG
759 if (rqd.error)
760 pblk_log_read_err(pblk, &rqd);
a4bd217b 761
af3fac16
JG
762clear_rqd:
763 pblk_free_rqd_meta(pblk, &rqd);
a4bd217b
JG
764 return ret;
765}
766
af3fac16
JG
767static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
768 u64 paddr)
a4bd217b
JG
769{
770 struct nvm_tgt_dev *dev = pblk->dev;
771 struct pblk_line_meta *lm = &pblk->lm;
772 struct bio *bio;
773 struct nvm_rq rqd;
af3fac16
JG
774 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
775 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
a4bd217b 776 int i, ret;
a4bd217b
JG
777
778 memset(&rqd, 0, sizeof(struct nvm_rq));
779
45dcf29b
JG
780 ret = pblk_alloc_rqd_meta(pblk, &rqd);
781 if (ret)
782 return ret;
63e3809c 783
a4bd217b
JG
784 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
785 if (IS_ERR(bio)) {
786 ret = PTR_ERR(bio);
45dcf29b 787 goto clear_rqd;
a4bd217b
JG
788 }
789
790 bio->bi_iter.bi_sector = 0; /* internal bio */
af3fac16 791 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
a4bd217b
JG
792
793 rqd.bio = bio;
af3fac16 794 rqd.opcode = NVM_OP_PWRITE;
a4bd217b 795 rqd.nr_ppas = lm->smeta_sec;
af3fac16 796 rqd.is_seq = 1;
a4bd217b
JG
797
798 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
63e3809c
JG
799 struct pblk_sec_meta *meta_list = rqd.meta_list;
800
a4bd217b 801 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
af3fac16 802 meta_list[i].lba = lba_list[paddr] = addr_empty;
a4bd217b
JG
803 }
804
253babc3 805 ret = pblk_submit_io_sync_sem(pblk, &rqd);
a4bd217b 806 if (ret) {
4e495a46 807 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
a4bd217b 808 bio_put(bio);
45dcf29b 809 goto clear_rqd;
a4bd217b
JG
810 }
811
588726d3 812 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
813
814 if (rqd.error) {
af3fac16
JG
815 pblk_log_write_err(pblk, &rqd);
816 ret = -EIO;
a4bd217b
JG
817 }
818
45dcf29b
JG
819clear_rqd:
820 pblk_free_rqd_meta(pblk, &rqd);
a4bd217b
JG
821 return ret;
822}
823
af3fac16
JG
824int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
825 void *emeta_buf)
a4bd217b 826{
af3fac16
JG
827 struct nvm_tgt_dev *dev = pblk->dev;
828 struct nvm_geo *geo = &dev->geo;
829 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
830 struct pblk_line_meta *lm = &pblk->lm;
831 void *ppa_list, *meta_list;
832 struct bio *bio;
833 struct nvm_rq rqd;
834 u64 paddr = line->emeta_ssec;
835 dma_addr_t dma_ppa_list, dma_meta_list;
836 int min = pblk->min_write_pgs;
837 int left_ppas = lm->emeta_sec[0];
838 int line_id = line->id;
839 int rq_ppas, rq_len;
840 int i, j;
841 int ret;
a4bd217b 842
af3fac16
JG
843 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
844 &dma_meta_list);
845 if (!meta_list)
846 return -ENOMEM;
a4bd217b 847
af3fac16
JG
848 ppa_list = meta_list + pblk_dma_meta_size;
849 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
850
851next_rq:
852 memset(&rqd, 0, sizeof(struct nvm_rq));
853
854 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
855 rq_len = rq_ppas * geo->csecs;
856
857 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
858 l_mg->emeta_alloc_type, GFP_KERNEL);
859 if (IS_ERR(bio)) {
860 ret = PTR_ERR(bio);
861 goto free_rqd_dma;
862 }
863
864 bio->bi_iter.bi_sector = 0; /* internal bio */
865 bio_set_op_attrs(bio, REQ_OP_READ, 0);
866
867 rqd.bio = bio;
868 rqd.meta_list = meta_list;
869 rqd.ppa_list = ppa_list;
870 rqd.dma_meta_list = dma_meta_list;
871 rqd.dma_ppa_list = dma_ppa_list;
872 rqd.opcode = NVM_OP_PREAD;
873 rqd.nr_ppas = rq_ppas;
874
875 for (i = 0; i < rqd.nr_ppas; ) {
876 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
877 int pos = pblk_ppa_to_pos(geo, ppa);
878
879 if (pblk_io_aligned(pblk, rq_ppas))
880 rqd.is_seq = 1;
881
882 while (test_bit(pos, line->blk_bitmap)) {
883 paddr += min;
884 if (pblk_boundary_paddr_checks(pblk, paddr)) {
885 bio_put(bio);
886 ret = -EINTR;
887 goto free_rqd_dma;
888 }
889
890 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
891 pos = pblk_ppa_to_pos(geo, ppa);
892 }
893
894 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
895 bio_put(bio);
896 ret = -EINTR;
897 goto free_rqd_dma;
898 }
899
900 for (j = 0; j < min; j++, i++, paddr++)
901 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
902 }
903
904 ret = pblk_submit_io_sync(pblk, &rqd);
905 if (ret) {
906 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
907 bio_put(bio);
908 goto free_rqd_dma;
909 }
910
911 atomic_dec(&pblk->inflight_io);
912
913 if (rqd.error)
914 pblk_log_read_err(pblk, &rqd);
915
916 emeta_buf += rq_len;
917 left_ppas -= rq_ppas;
918 if (left_ppas)
919 goto next_rq;
920
921free_rqd_dma:
922 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
923 return ret;
a4bd217b
JG
924}
925
926static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
927 struct ppa_addr ppa)
928{
929 rqd->opcode = NVM_OP_ERASE;
930 rqd->ppa_addr = ppa;
931 rqd->nr_ppas = 1;
d7b68016 932 rqd->is_seq = 1;
a4bd217b
JG
933 rqd->bio = NULL;
934}
935
936static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
937{
4b5d56ed
MB
938 struct nvm_rq rqd = {NULL};
939 int ret;
a4bd217b 940
4209c31c
HH
941 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
942 PBLK_CHUNK_RESET_START);
943
a4bd217b
JG
944 pblk_setup_e_rq(pblk, &rqd, ppa);
945
a4bd217b
JG
946 /* The write thread schedules erases so that it minimizes disturbances
947 * with writes. Thus, there is no need to take the LUN semaphore.
948 */
1a94b2d4 949 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
950 rqd.private = pblk;
951 __pblk_end_io_erase(pblk, &rqd);
952
588726d3 953 return ret;
a4bd217b
JG
954}
955
956int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
957{
958 struct pblk_line_meta *lm = &pblk->lm;
959 struct ppa_addr ppa;
588726d3 960 int ret, bit = -1;
a4bd217b 961
a44f53fa
JG
962 /* Erase only good blocks, one at a time */
963 do {
964 spin_lock(&line->lock);
965 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
966 bit + 1);
967 if (bit >= lm->blk_per_line) {
968 spin_unlock(&line->lock);
969 break;
970 }
971
a4bd217b 972 ppa = pblk->luns[bit].bppa; /* set ch and lun */
69471513 973 ppa.a.blk = line->id;
a4bd217b 974
a44f53fa 975 atomic_dec(&line->left_eblks);
a4bd217b 976 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
a44f53fa 977 spin_unlock(&line->lock);
a4bd217b 978
588726d3
JG
979 ret = pblk_blk_erase_sync(pblk, ppa);
980 if (ret) {
4e495a46 981 pblk_err(pblk, "failed to erase line %d\n", line->id);
588726d3 982 return ret;
a4bd217b 983 }
a44f53fa 984 } while (1);
a4bd217b
JG
985
986 return 0;
987}
988
dd2a4343
JG
989static void pblk_line_setup_metadata(struct pblk_line *line,
990 struct pblk_line_mgmt *l_mg,
991 struct pblk_line_meta *lm)
992{
993 int meta_line;
994
588726d3
JG
995 lockdep_assert_held(&l_mg->free_lock);
996
dd2a4343
JG
997retry_meta:
998 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
999 if (meta_line == PBLK_DATA_LINES) {
1000 spin_unlock(&l_mg->free_lock);
1001 io_schedule();
1002 spin_lock(&l_mg->free_lock);
1003 goto retry_meta;
1004 }
1005
1006 set_bit(meta_line, &l_mg->meta_bitmap);
1007 line->meta_line = meta_line;
1008
1009 line->smeta = l_mg->sline_meta[meta_line];
1010 line->emeta = l_mg->eline_meta[meta_line];
1011
1012 memset(line->smeta, 0, lm->smeta_len);
1013 memset(line->emeta->buf, 0, lm->emeta_len[0]);
1014
1015 line->emeta->mem = 0;
1016 atomic_set(&line->emeta->sync, 0);
1017}
1018
a4bd217b
JG
1019/* For now lines are always assumed full lines. Thus, smeta former and current
1020 * lun bitmaps are omitted.
1021 */
dd2a4343 1022static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
a4bd217b
JG
1023 struct pblk_line *cur)
1024{
1025 struct nvm_tgt_dev *dev = pblk->dev;
1026 struct nvm_geo *geo = &dev->geo;
1027 struct pblk_line_meta *lm = &pblk->lm;
1028 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
dd2a4343
JG
1029 struct pblk_emeta *emeta = line->emeta;
1030 struct line_emeta *emeta_buf = emeta->buf;
1031 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
a4bd217b
JG
1032 int nr_blk_line;
1033
1034 /* After erasing the line, new bad blocks might appear and we risk
1035 * having an invalid line
1036 */
1037 nr_blk_line = lm->blk_per_line -
1038 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1039 if (nr_blk_line < lm->min_blk_line) {
1040 spin_lock(&l_mg->free_lock);
1041 spin_lock(&line->lock);
1042 line->state = PBLK_LINESTATE_BAD;
f2937232
HH
1043 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1044 line->state);
a4bd217b
JG
1045 spin_unlock(&line->lock);
1046
1047 list_add_tail(&line->list, &l_mg->bad_list);
1048 spin_unlock(&l_mg->free_lock);
1049
4e495a46 1050 pblk_debug(pblk, "line %d is bad\n", line->id);
a4bd217b
JG
1051
1052 return 0;
1053 }
1054
1055 /* Run-time metadata */
dd2a4343 1056 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
a4bd217b
JG
1057
1058 /* Mark LUNs allocated in this line (all for now) */
1059 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1060
dd2a4343
JG
1061 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1062 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1063 smeta_buf->header.id = cpu_to_le32(line->id);
1064 smeta_buf->header.type = cpu_to_le16(line->type);
d0ab0b1a
HH
1065 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1066 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
a4bd217b
JG
1067
1068 /* Start metadata */
dd2a4343 1069 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
fae7fae4 1070 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
a4bd217b
JG
1071
1072 /* Fill metadata among lines */
1073 if (cur) {
1074 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
dd2a4343
JG
1075 smeta_buf->prev_id = cpu_to_le32(cur->id);
1076 cur->emeta->buf->next_id = cpu_to_le32(line->id);
a4bd217b 1077 } else {
dd2a4343 1078 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
a4bd217b
JG
1079 }
1080
1081 /* All smeta must be set at this point */
dd2a4343
JG
1082 smeta_buf->header.crc = cpu_to_le32(
1083 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1084 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
a4bd217b
JG
1085
1086 /* End metadata */
dd2a4343
JG
1087 memcpy(&emeta_buf->header, &smeta_buf->header,
1088 sizeof(struct line_header));
d0ab0b1a
HH
1089
1090 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1091 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1092 emeta_buf->header.crc = cpu_to_le32(
1093 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1094
dd2a4343
JG
1095 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1096 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1097 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1098 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1099 emeta_buf->crc = cpu_to_le32(0);
1100 emeta_buf->prev_id = smeta_buf->prev_id;
a4bd217b
JG
1101
1102 return 1;
1103}
1104
9cfd5a95
JG
1105static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1106{
1107 struct pblk_line_meta *lm = &pblk->lm;
53d82db6 1108 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
9cfd5a95 1109
53d82db6 1110 line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
9cfd5a95
JG
1111 if (!line->map_bitmap)
1112 return -ENOMEM;
1113
53d82db6
HH
1114 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1115
9cfd5a95 1116 /* will be initialized using bb info from map_bitmap */
53d82db6 1117 line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
9cfd5a95 1118 if (!line->invalid_bitmap) {
53d82db6 1119 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
9cfd5a95
JG
1120 line->map_bitmap = NULL;
1121 return -ENOMEM;
1122 }
1123
1124 return 0;
1125}
1126
a4bd217b
JG
1127/* For now lines are always assumed full lines. Thus, smeta former and current
1128 * lun bitmaps are omitted.
1129 */
1130static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1131 int init)
1132{
1133 struct nvm_tgt_dev *dev = pblk->dev;
1134 struct nvm_geo *geo = &dev->geo;
1135 struct pblk_line_meta *lm = &pblk->lm;
1136 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
a4bd217b
JG
1137 u64 off;
1138 int bit = -1;
cfe1c9e2 1139 int emeta_secs;
a4bd217b
JG
1140
1141 line->sec_in_line = lm->sec_per_line;
1142
1143 /* Capture bad block information on line mapping bitmaps */
1144 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1145 bit + 1)) < lm->blk_per_line) {
e46f4e48 1146 off = bit * geo->ws_opt;
a4bd217b
JG
1147 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1148 lm->sec_per_line);
1149 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1150 lm->sec_per_line);
e46f4e48 1151 line->sec_in_line -= geo->clba;
a4bd217b
JG
1152 }
1153
1154 /* Mark smeta metadata sectors as bad sectors */
1155 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
e46f4e48 1156 off = bit * geo->ws_opt;
a4bd217b
JG
1157 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1158 line->sec_in_line -= lm->smeta_sec;
1159 line->smeta_ssec = off;
1160 line->cur_sec = off + lm->smeta_sec;
1161
af3fac16 1162 if (init && pblk_line_smeta_write(pblk, line, off)) {
4e495a46 1163 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
6cf17a2f 1164 return 0;
a4bd217b
JG
1165 }
1166
1167 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1168
1169 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1170 * blocks to make sure that there are enough sectors to store emeta
1171 */
cfe1c9e2
HH
1172 emeta_secs = lm->emeta_sec[0];
1173 off = lm->sec_per_line;
1174 while (emeta_secs) {
e46f4e48 1175 off -= geo->ws_opt;
a4bd217b 1176 if (!test_bit(off, line->invalid_bitmap)) {
e46f4e48
JG
1177 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1178 emeta_secs -= geo->ws_opt;
a4bd217b
JG
1179 }
1180 }
1181
a4bd217b 1182 line->emeta_ssec = off;
cfe1c9e2 1183 line->sec_in_line -= lm->emeta_sec[0];
dd2a4343 1184 line->nr_valid_lbas = 0;
0880a9aa 1185 line->left_msecs = line->sec_in_line;
dd2a4343 1186 *line->vsc = cpu_to_le32(line->sec_in_line);
a4bd217b
JG
1187
1188 if (lm->sec_per_line - line->sec_in_line !=
1189 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1190 spin_lock(&line->lock);
1191 line->state = PBLK_LINESTATE_BAD;
f2937232
HH
1192 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1193 line->state);
a4bd217b
JG
1194 spin_unlock(&line->lock);
1195
1196 list_add_tail(&line->list, &l_mg->bad_list);
4e495a46 1197 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
a4bd217b
JG
1198
1199 return 0;
1200 }
1201
1202 return 1;
1203}
1204
32ef9412
JG
1205static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1206{
1207 struct pblk_line_meta *lm = &pblk->lm;
1208 struct nvm_tgt_dev *dev = pblk->dev;
1209 struct nvm_geo *geo = &dev->geo;
1210 int blk_to_erase = atomic_read(&line->blk_in_line);
1211 int i;
1212
1213 for (i = 0; i < lm->blk_per_line; i++) {
1214 struct pblk_lun *rlun = &pblk->luns[i];
1215 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1216 int state = line->chks[pos].state;
1217
1218 /* Free chunks should not be erased */
1219 if (state & NVM_CHK_ST_FREE) {
1220 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1221 line->erase_bitmap);
1222 blk_to_erase--;
1223 }
1224 }
1225
1226 return blk_to_erase;
1227}
1228
a4bd217b
JG
1229static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1230{
1231 struct pblk_line_meta *lm = &pblk->lm;
1d8b33e0 1232 int blk_in_line = atomic_read(&line->blk_in_line);
9cfd5a95 1233 int blk_to_erase;
a4bd217b 1234
32ef9412
JG
1235 /* Bad blocks do not need to be erased */
1236 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1237
a4bd217b 1238 spin_lock(&line->lock);
32ef9412
JG
1239
1240 /* If we have not written to this line, we need to mark up free chunks
1241 * as already erased
1242 */
1243 if (line->state == PBLK_LINESTATE_NEW) {
1244 blk_to_erase = pblk_prepare_new_line(pblk, line);
1245 line->state = PBLK_LINESTATE_FREE;
f2937232
HH
1246 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1247 line->state);
32ef9412 1248 } else {
1d8b33e0
JG
1249 blk_to_erase = blk_in_line;
1250 }
1251
1252 if (blk_in_line < lm->min_blk_line) {
9cfd5a95
JG
1253 spin_unlock(&line->lock);
1254 return -EAGAIN;
32ef9412
JG
1255 }
1256
a4bd217b 1257 if (line->state != PBLK_LINESTATE_FREE) {
588726d3
JG
1258 WARN(1, "pblk: corrupted line %d, state %d\n",
1259 line->id, line->state);
9cfd5a95
JG
1260 spin_unlock(&line->lock);
1261 return -EINTR;
a4bd217b 1262 }
588726d3 1263
a4bd217b 1264 line->state = PBLK_LINESTATE_OPEN;
f2937232
HH
1265 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1266 line->state);
a44f53fa 1267
32ef9412
JG
1268 atomic_set(&line->left_eblks, blk_to_erase);
1269 atomic_set(&line->left_seblks, blk_to_erase);
dd2a4343
JG
1270
1271 line->meta_distance = lm->meta_distance;
a4bd217b
JG
1272 spin_unlock(&line->lock);
1273
a4bd217b
JG
1274 kref_init(&line->ref);
1275
1276 return 0;
1277}
1278
1279int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1280{
1281 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1282 int ret;
1283
1284 spin_lock(&l_mg->free_lock);
1285 l_mg->data_line = line;
1286 list_del(&line->list);
a4bd217b
JG
1287
1288 ret = pblk_line_prepare(pblk, line);
1289 if (ret) {
1290 list_add(&line->list, &l_mg->free_list);
3dc001f3 1291 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
1292 return ret;
1293 }
3dc001f3 1294 spin_unlock(&l_mg->free_lock);
a4bd217b 1295
9cfd5a95
JG
1296 ret = pblk_line_alloc_bitmaps(pblk, line);
1297 if (ret)
1298 return ret;
a4bd217b
JG
1299
1300 if (!pblk_line_init_bb(pblk, line, 0)) {
1301 list_add(&line->list, &l_mg->free_list);
1302 return -EINTR;
1303 }
1304
9cfd5a95 1305 pblk_rl_free_lines_dec(&pblk->rl, line, true);
a4bd217b
JG
1306 return 0;
1307}
1308
1309void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1310{
53d82db6
HH
1311 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1312
1313 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
a4bd217b
JG
1314 line->map_bitmap = NULL;
1315 line->smeta = NULL;
1316 line->emeta = NULL;
1317}
1318
9cfd5a95
JG
1319static void pblk_line_reinit(struct pblk_line *line)
1320{
1321 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1322
1323 line->map_bitmap = NULL;
1324 line->invalid_bitmap = NULL;
1325 line->smeta = NULL;
1326 line->emeta = NULL;
1327}
1328
1329void pblk_line_free(struct pblk_line *line)
1330{
53d82db6
HH
1331 struct pblk *pblk = line->pblk;
1332 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1333
1334 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1335 mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
9cfd5a95
JG
1336
1337 pblk_line_reinit(line);
1338}
1339
a4bd217b
JG
1340struct pblk_line *pblk_line_get(struct pblk *pblk)
1341{
1342 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1343 struct pblk_line_meta *lm = &pblk->lm;
588726d3
JG
1344 struct pblk_line *line;
1345 int ret, bit;
a4bd217b
JG
1346
1347 lockdep_assert_held(&l_mg->free_lock);
1348
588726d3 1349retry:
a4bd217b 1350 if (list_empty(&l_mg->free_list)) {
4e495a46 1351 pblk_err(pblk, "no free lines\n");
588726d3 1352 return NULL;
a4bd217b
JG
1353 }
1354
1355 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1356 list_del(&line->list);
1357 l_mg->nr_free_lines--;
1358
1359 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1360 if (unlikely(bit >= lm->blk_per_line)) {
1361 spin_lock(&line->lock);
1362 line->state = PBLK_LINESTATE_BAD;
f2937232
HH
1363 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1364 line->state);
a4bd217b
JG
1365 spin_unlock(&line->lock);
1366
1367 list_add_tail(&line->list, &l_mg->bad_list);
1368
4e495a46 1369 pblk_debug(pblk, "line %d is bad\n", line->id);
588726d3 1370 goto retry;
a4bd217b
JG
1371 }
1372
588726d3
JG
1373 ret = pblk_line_prepare(pblk, line);
1374 if (ret) {
1d8b33e0
JG
1375 switch (ret) {
1376 case -EAGAIN:
1377 list_add(&line->list, &l_mg->bad_list);
1378 goto retry;
1379 case -EINTR:
588726d3
JG
1380 list_add(&line->list, &l_mg->corrupt_list);
1381 goto retry;
1d8b33e0 1382 default:
4e495a46 1383 pblk_err(pblk, "failed to prepare line %d\n", line->id);
588726d3
JG
1384 list_add(&line->list, &l_mg->free_list);
1385 l_mg->nr_free_lines++;
1386 return NULL;
1387 }
a4bd217b
JG
1388 }
1389
a4bd217b
JG
1390 return line;
1391}
1392
1393static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1394 struct pblk_line *line)
1395{
1396 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1397 struct pblk_line *retry_line;
1398
588726d3 1399retry:
a4bd217b
JG
1400 spin_lock(&l_mg->free_lock);
1401 retry_line = pblk_line_get(pblk);
1402 if (!retry_line) {
be388d9f 1403 l_mg->data_line = NULL;
a4bd217b
JG
1404 spin_unlock(&l_mg->free_lock);
1405 return NULL;
1406 }
1407
9cfd5a95
JG
1408 retry_line->map_bitmap = line->map_bitmap;
1409 retry_line->invalid_bitmap = line->invalid_bitmap;
a4bd217b
JG
1410 retry_line->smeta = line->smeta;
1411 retry_line->emeta = line->emeta;
1412 retry_line->meta_line = line->meta_line;
a4bd217b 1413
9cfd5a95
JG
1414 pblk_line_reinit(line);
1415
3dc001f3 1416 l_mg->data_line = retry_line;
a4bd217b
JG
1417 spin_unlock(&l_mg->free_lock);
1418
a7689938 1419 pblk_rl_free_lines_dec(&pblk->rl, line, false);
a4bd217b 1420
588726d3
JG
1421 if (pblk_line_erase(pblk, retry_line))
1422 goto retry;
1423
a4bd217b
JG
1424 return retry_line;
1425}
1426
588726d3
JG
1427static void pblk_set_space_limit(struct pblk *pblk)
1428{
1429 struct pblk_rl *rl = &pblk->rl;
1430
1431 atomic_set(&rl->rb_space, 0);
1432}
1433
a4bd217b
JG
1434struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1435{
1436 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1437 struct pblk_line *line;
a4bd217b
JG
1438
1439 spin_lock(&l_mg->free_lock);
1440 line = pblk_line_get(pblk);
1441 if (!line) {
1442 spin_unlock(&l_mg->free_lock);
1443 return NULL;
1444 }
1445
1446 line->seq_nr = l_mg->d_seq_nr++;
1447 line->type = PBLK_LINETYPE_DATA;
1448 l_mg->data_line = line;
1449
dd2a4343 1450 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
a4bd217b
JG
1451
1452 /* Allocate next line for preparation */
1453 l_mg->data_next = pblk_line_get(pblk);
588726d3
JG
1454 if (!l_mg->data_next) {
1455 /* If we cannot get a new line, we need to stop the pipeline.
1456 * Only allow as many writes in as we can store safely and then
1457 * fail gracefully
1458 */
1459 pblk_set_space_limit(pblk);
1460
1461 l_mg->data_next = NULL;
1462 } else {
a4bd217b
JG
1463 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1464 l_mg->data_next->type = PBLK_LINETYPE_DATA;
a4bd217b
JG
1465 }
1466 spin_unlock(&l_mg->free_lock);
1467
9cfd5a95
JG
1468 if (pblk_line_alloc_bitmaps(pblk, line))
1469 return NULL;
1470
588726d3
JG
1471 if (pblk_line_erase(pblk, line)) {
1472 line = pblk_line_retry(pblk, line);
1473 if (!line)
1474 return NULL;
1475 }
1476
a4bd217b 1477retry_setup:
dd2a4343 1478 if (!pblk_line_init_metadata(pblk, line, NULL)) {
a4bd217b
JG
1479 line = pblk_line_retry(pblk, line);
1480 if (!line)
1481 return NULL;
1482
1483 goto retry_setup;
1484 }
1485
1486 if (!pblk_line_init_bb(pblk, line, 1)) {
1487 line = pblk_line_retry(pblk, line);
1488 if (!line)
1489 return NULL;
1490
1491 goto retry_setup;
1492 }
1493
a7689938
JG
1494 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1495
a4bd217b
JG
1496 return line;
1497}
1498
ae14cc04
MB
1499void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1500{
1501 struct pblk_line *line;
1502
cb21665c 1503 line = pblk_ppa_to_line(pblk, ppa);
ae14cc04
MB
1504 kref_put(&line->ref, pblk_line_put_wq);
1505}
1506
1507void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1508{
1509 struct ppa_addr *ppa_list;
1510 int i;
1511
1512 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1513
1514 for (i = 0; i < rqd->nr_ppas; i++)
1515 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1516}
1517
588726d3
JG
1518static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1519{
1520 lockdep_assert_held(&pblk->l_mg.free_lock);
1521
1522 pblk_set_space_limit(pblk);
1523 pblk->state = PBLK_STATE_STOPPING;
1b0dd0bf 1524 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
588726d3
JG
1525}
1526
8bd40020
JG
1527static void pblk_line_close_meta_sync(struct pblk *pblk)
1528{
1529 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1530 struct pblk_line_meta *lm = &pblk->lm;
1531 struct pblk_line *line, *tline;
1532 LIST_HEAD(list);
1533
1534 spin_lock(&l_mg->close_lock);
1535 if (list_empty(&l_mg->emeta_list)) {
1536 spin_unlock(&l_mg->close_lock);
1537 return;
1538 }
1539
1540 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1541 spin_unlock(&l_mg->close_lock);
1542
1543 list_for_each_entry_safe(line, tline, &list, list) {
1544 struct pblk_emeta *emeta = line->emeta;
1545
1546 while (emeta->mem < lm->emeta_len[0]) {
1547 int ret;
1548
1549 ret = pblk_submit_meta_io(pblk, line);
1550 if (ret) {
4e495a46 1551 pblk_err(pblk, "sync meta line %d failed (%d)\n",
8bd40020
JG
1552 line->id, ret);
1553 return;
1554 }
1555 }
1556 }
1557
1558 pblk_wait_for_meta(pblk);
1559 flush_workqueue(pblk->close_wq);
1560}
1561
a7c9e910 1562void __pblk_pipeline_flush(struct pblk *pblk)
588726d3
JG
1563{
1564 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1565 int ret;
1566
1567 spin_lock(&l_mg->free_lock);
1568 if (pblk->state == PBLK_STATE_RECOVERING ||
1569 pblk->state == PBLK_STATE_STOPPED) {
1570 spin_unlock(&l_mg->free_lock);
1571 return;
1572 }
1573 pblk->state = PBLK_STATE_RECOVERING;
1b0dd0bf 1574 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
588726d3
JG
1575 spin_unlock(&l_mg->free_lock);
1576
1577 pblk_flush_writer(pblk);
1578 pblk_wait_for_meta(pblk);
1579
1580 ret = pblk_recov_pad(pblk);
1581 if (ret) {
4e495a46 1582 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
588726d3
JG
1583 return;
1584 }
1585
ee8d5c1a 1586 flush_workqueue(pblk->bb_wq);
588726d3 1587 pblk_line_close_meta_sync(pblk);
a7c9e910
JG
1588}
1589
1590void __pblk_pipeline_stop(struct pblk *pblk)
1591{
1592 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
588726d3
JG
1593
1594 spin_lock(&l_mg->free_lock);
1595 pblk->state = PBLK_STATE_STOPPED;
1b0dd0bf 1596 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
588726d3
JG
1597 l_mg->data_line = NULL;
1598 l_mg->data_next = NULL;
1599 spin_unlock(&l_mg->free_lock);
1600}
1601
a7c9e910
JG
1602void pblk_pipeline_stop(struct pblk *pblk)
1603{
1604 __pblk_pipeline_flush(pblk);
1605 __pblk_pipeline_stop(pblk);
1606}
1607
21d22871 1608struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
a4bd217b 1609{
a4bd217b 1610 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
21d22871 1611 struct pblk_line *cur, *new = NULL;
a4bd217b 1612 unsigned int left_seblks;
a4bd217b 1613
a4bd217b
JG
1614 new = l_mg->data_next;
1615 if (!new)
21d22871 1616 goto out;
a4bd217b 1617
588726d3 1618 spin_lock(&l_mg->free_lock);
44cdbdc6
JG
1619 cur = l_mg->data_line;
1620 l_mg->data_line = new;
1621
588726d3
JG
1622 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1623 spin_unlock(&l_mg->free_lock);
1624
1625retry_erase:
a4bd217b
JG
1626 left_seblks = atomic_read(&new->left_seblks);
1627 if (left_seblks) {
1628 /* If line is not fully erased, erase it */
a44f53fa 1629 if (atomic_read(&new->left_eblks)) {
a4bd217b 1630 if (pblk_line_erase(pblk, new))
21d22871 1631 goto out;
a4bd217b
JG
1632 } else {
1633 io_schedule();
1634 }
588726d3 1635 goto retry_erase;
a4bd217b
JG
1636 }
1637
9cfd5a95
JG
1638 if (pblk_line_alloc_bitmaps(pblk, new))
1639 return NULL;
1640
a4bd217b 1641retry_setup:
dd2a4343 1642 if (!pblk_line_init_metadata(pblk, new, cur)) {
a4bd217b 1643 new = pblk_line_retry(pblk, new);
f3236cef 1644 if (!new)
21d22871 1645 goto out;
a4bd217b
JG
1646
1647 goto retry_setup;
1648 }
1649
1650 if (!pblk_line_init_bb(pblk, new, 1)) {
1651 new = pblk_line_retry(pblk, new);
1652 if (!new)
21d22871 1653 goto out;
a4bd217b
JG
1654
1655 goto retry_setup;
1656 }
1657
a7689938
JG
1658 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1659
588726d3
JG
1660 /* Allocate next line for preparation */
1661 spin_lock(&l_mg->free_lock);
1662 l_mg->data_next = pblk_line_get(pblk);
1663 if (!l_mg->data_next) {
1664 /* If we cannot get a new line, we need to stop the pipeline.
1665 * Only allow as many writes in as we can store safely and then
1666 * fail gracefully
1667 */
1668 pblk_stop_writes(pblk, new);
1669 l_mg->data_next = NULL;
1670 } else {
1671 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1672 l_mg->data_next->type = PBLK_LINETYPE_DATA;
588726d3
JG
1673 }
1674 spin_unlock(&l_mg->free_lock);
1675
21d22871
JG
1676out:
1677 return new;
a4bd217b
JG
1678}
1679
7bd4d370 1680static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
a4bd217b 1681{
a4bd217b 1682 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
d6b992f7 1683 struct pblk_gc *gc = &pblk->gc;
a4bd217b
JG
1684
1685 spin_lock(&line->lock);
1686 WARN_ON(line->state != PBLK_LINESTATE_GC);
1687 line->state = PBLK_LINESTATE_FREE;
f2937232
HH
1688 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1689 line->state);
a4bd217b 1690 line->gc_group = PBLK_LINEGC_NONE;
8e55c07b 1691 pblk_line_free(line);
a4bd217b 1692
48b8d208
HH
1693 if (line->w_err_gc->has_write_err) {
1694 pblk_rl_werr_line_out(&pblk->rl);
1695 line->w_err_gc->has_write_err = 0;
1696 }
1697
1698 spin_unlock(&line->lock);
d6b992f7
HH
1699 atomic_dec(&gc->pipeline_gc);
1700
a4bd217b
JG
1701 spin_lock(&l_mg->free_lock);
1702 list_add_tail(&line->list, &l_mg->free_list);
1703 l_mg->nr_free_lines++;
1704 spin_unlock(&l_mg->free_lock);
1705
1706 pblk_rl_free_lines_inc(&pblk->rl, line);
1707}
1708
7bd4d370
JG
1709static void pblk_line_put_ws(struct work_struct *work)
1710{
1711 struct pblk_line_ws *line_put_ws = container_of(work,
1712 struct pblk_line_ws, ws);
1713 struct pblk *pblk = line_put_ws->pblk;
1714 struct pblk_line *line = line_put_ws->line;
1715
1716 __pblk_line_put(pblk, line);
b906bbb6 1717 mempool_free(line_put_ws, &pblk->gen_ws_pool);
7bd4d370
JG
1718}
1719
1720void pblk_line_put(struct kref *ref)
1721{
1722 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1723 struct pblk *pblk = line->pblk;
1724
1725 __pblk_line_put(pblk, line);
1726}
1727
1728void pblk_line_put_wq(struct kref *ref)
1729{
1730 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1731 struct pblk *pblk = line->pblk;
1732 struct pblk_line_ws *line_put_ws;
1733
b906bbb6 1734 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
7bd4d370
JG
1735 if (!line_put_ws)
1736 return;
1737
1738 line_put_ws->pblk = pblk;
1739 line_put_ws->line = line;
1740 line_put_ws->priv = NULL;
1741
1742 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1743 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1744}
1745
a4bd217b
JG
1746int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1747{
1748 struct nvm_rq *rqd;
1749 int err;
1750
67bf26a3 1751 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
a4bd217b
JG
1752
1753 pblk_setup_e_rq(pblk, rqd, ppa);
1754
1755 rqd->end_io = pblk_end_io_erase;
1756 rqd->private = pblk;
1757
4209c31c
HH
1758 trace_pblk_chunk_reset(pblk_disk_name(pblk),
1759 &ppa, PBLK_CHUNK_RESET_START);
1760
a4bd217b
JG
1761 /* The write thread schedules erases so that it minimizes disturbances
1762 * with writes. Thus, there is no need to take the LUN semaphore.
1763 */
1764 err = pblk_submit_io(pblk, rqd);
1765 if (err) {
1766 struct nvm_tgt_dev *dev = pblk->dev;
1767 struct nvm_geo *geo = &dev->geo;
1768
4e495a46 1769 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
cb21665c 1770 pblk_ppa_to_line_id(ppa),
b1bcfda1 1771 pblk_ppa_to_pos(geo, ppa));
a4bd217b
JG
1772 }
1773
1774 return err;
1775}
1776
1777struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1778{
1779 return pblk->l_mg.data_line;
1780}
1781
d624f371
JG
1782/* For now, always erase next line */
1783struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
a4bd217b
JG
1784{
1785 return pblk->l_mg.data_next;
1786}
1787
1788int pblk_line_is_full(struct pblk_line *line)
1789{
1790 return (line->left_msecs == 0);
1791}
1792
588726d3
JG
1793static void pblk_line_should_sync_meta(struct pblk *pblk)
1794{
1795 if (pblk_rl_is_limit(&pblk->rl))
1796 pblk_line_close_meta_sync(pblk);
1797}
1798
a4bd217b
JG
1799void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1800{
32ef9412
JG
1801 struct nvm_tgt_dev *dev = pblk->dev;
1802 struct nvm_geo *geo = &dev->geo;
1803 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b
JG
1804 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1805 struct list_head *move_list;
32ef9412 1806 int i;
a4bd217b 1807
880eda54 1808#ifdef CONFIG_NVM_PBLK_DEBUG
dd2a4343 1809 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
a4bd217b 1810 "pblk: corrupt closed line %d\n", line->id);
a84ebb83 1811#endif
a4bd217b
JG
1812
1813 spin_lock(&l_mg->free_lock);
1814 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1815 spin_unlock(&l_mg->free_lock);
1816
1817 spin_lock(&l_mg->gc_lock);
1818 spin_lock(&line->lock);
1819 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1820 line->state = PBLK_LINESTATE_CLOSED;
1821 move_list = pblk_line_gc_list(pblk, line);
a4bd217b
JG
1822 list_add_tail(&line->list, move_list);
1823
53d82db6 1824 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
a4bd217b
JG
1825 line->map_bitmap = NULL;
1826 line->smeta = NULL;
1827 line->emeta = NULL;
1828
32ef9412
JG
1829 for (i = 0; i < lm->blk_per_line; i++) {
1830 struct pblk_lun *rlun = &pblk->luns[i];
1831 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1832 int state = line->chks[pos].state;
1833
1834 if (!(state & NVM_CHK_ST_OFFLINE))
1835 state = NVM_CHK_ST_CLOSED;
1836 }
1837
a4bd217b
JG
1838 spin_unlock(&line->lock);
1839 spin_unlock(&l_mg->gc_lock);
f2937232
HH
1840
1841 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1842 line->state);
a4bd217b
JG
1843}
1844
dd2a4343
JG
1845void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1846{
1847 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1848 struct pblk_line_meta *lm = &pblk->lm;
1849 struct pblk_emeta *emeta = line->emeta;
1850 struct line_emeta *emeta_buf = emeta->buf;
76758390 1851 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
dd2a4343 1852
588726d3 1853 /* No need for exact vsc value; avoid a big line lock and take aprox. */
dd2a4343
JG
1854 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1855 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1856
76758390
HH
1857 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1858 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1859 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1860
9cc85bc7
JG
1861 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1862 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1863 memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1864 emeta_buf->header.id = cpu_to_le32(line->id);
1865 emeta_buf->header.type = cpu_to_le16(line->type);
1866 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1867 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1868 emeta_buf->header.crc = cpu_to_le32(
1869 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1870 }
1871
dd2a4343
JG
1872 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1873 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1874
1875 spin_lock(&l_mg->close_lock);
1876 spin_lock(&line->lock);
48b8d208
HH
1877
1878 /* Update the in-memory start address for emeta, in case it has
1879 * shifted due to write errors
1880 */
1881 if (line->emeta_ssec != line->cur_sec)
1882 line->emeta_ssec = line->cur_sec;
1883
dd2a4343
JG
1884 list_add_tail(&line->list, &l_mg->emeta_list);
1885 spin_unlock(&line->lock);
1886 spin_unlock(&l_mg->close_lock);
588726d3
JG
1887
1888 pblk_line_should_sync_meta(pblk);
48b8d208
HH
1889}
1890
1891static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1892{
1893 struct pblk_line_meta *lm = &pblk->lm;
1894 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1895 unsigned int lba_list_size = lm->emeta_len[2];
1896 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1897 struct pblk_emeta *emeta = line->emeta;
1898
1899 w_err_gc->lba_list = pblk_malloc(lba_list_size,
1900 l_mg->emeta_alloc_type, GFP_KERNEL);
1901 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1902 lba_list_size);
dd2a4343
JG
1903}
1904
a4bd217b
JG
1905void pblk_line_close_ws(struct work_struct *work)
1906{
1907 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1908 ws);
1909 struct pblk *pblk = line_ws->pblk;
1910 struct pblk_line *line = line_ws->line;
48b8d208
HH
1911 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1912
1913 /* Write errors makes the emeta start address stored in smeta invalid,
1914 * so keep a copy of the lba list until we've gc'd the line
1915 */
1916 if (w_err_gc->has_write_err)
1917 pblk_save_lba_list(pblk, line);
a4bd217b
JG
1918
1919 pblk_line_close(pblk, line);
b906bbb6 1920 mempool_free(line_ws, &pblk->gen_ws_pool);
a4bd217b
JG
1921}
1922
b84ae4a8
JG
1923void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1924 void (*work)(struct work_struct *), gfp_t gfp_mask,
ef576494 1925 struct workqueue_struct *wq)
a4bd217b
JG
1926{
1927 struct pblk_line_ws *line_ws;
1928
b906bbb6 1929 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
a4bd217b
JG
1930
1931 line_ws->pblk = pblk;
1932 line_ws->line = line;
1933 line_ws->priv = priv;
1934
1935 INIT_WORK(&line_ws->ws, work);
ef576494 1936 queue_work(wq, &line_ws->ws);
a4bd217b
JG
1937}
1938
43241cfe 1939static void __pblk_down_chunk(struct pblk *pblk, int pos)
a4bd217b 1940{
3eaa11e2 1941 struct pblk_lun *rlun = &pblk->luns[pos];
a4bd217b
JG
1942 int ret;
1943
1944 /*
1945 * Only send one inflight I/O per LUN. Since we map at a page
1946 * granurality, all ppas in the I/O will map to the same LUN
1947 */
a4bd217b 1948
3eaa11e2 1949 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
c5493845 1950 if (ret == -ETIME || ret == -EINTR)
4e495a46
MB
1951 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1952 -ret);
a4bd217b
JG
1953}
1954
43241cfe 1955void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
3eaa11e2
JG
1956{
1957 struct nvm_tgt_dev *dev = pblk->dev;
1958 struct nvm_geo *geo = &dev->geo;
43241cfe 1959 int pos = pblk_ppa_to_pos(geo, ppa);
3eaa11e2 1960
43241cfe 1961 __pblk_down_chunk(pblk, pos);
3eaa11e2
JG
1962}
1963
43241cfe 1964void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
3eaa11e2
JG
1965 unsigned long *lun_bitmap)
1966{
1967 struct nvm_tgt_dev *dev = pblk->dev;
1968 struct nvm_geo *geo = &dev->geo;
43241cfe 1969 int pos = pblk_ppa_to_pos(geo, ppa);
3eaa11e2
JG
1970
1971 /* If the LUN has been locked for this same request, do no attempt to
1972 * lock it again
1973 */
1974 if (test_and_set_bit(pos, lun_bitmap))
1975 return;
1976
43241cfe 1977 __pblk_down_chunk(pblk, pos);
3eaa11e2
JG
1978}
1979
43241cfe 1980void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
3eaa11e2
JG
1981{
1982 struct nvm_tgt_dev *dev = pblk->dev;
1983 struct nvm_geo *geo = &dev->geo;
1984 struct pblk_lun *rlun;
43241cfe 1985 int pos = pblk_ppa_to_pos(geo, ppa);
3eaa11e2
JG
1986
1987 rlun = &pblk->luns[pos];
1988 up(&rlun->wr_sem);
1989}
1990
e99e802f 1991void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
a4bd217b
JG
1992{
1993 struct nvm_tgt_dev *dev = pblk->dev;
1994 struct nvm_geo *geo = &dev->geo;
1995 struct pblk_lun *rlun;
a40afad9 1996 int num_lun = geo->all_luns;
a4bd217b
JG
1997 int bit = -1;
1998
a40afad9 1999 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
a4bd217b
JG
2000 rlun = &pblk->luns[bit];
2001 up(&rlun->wr_sem);
2002 }
a4bd217b
JG
2003}
2004
2005void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2006{
9f6cb13b 2007 struct ppa_addr ppa_l2p;
a4bd217b
JG
2008
2009 /* logic error: lba out-of-bounds. Ignore update */
2010 if (!(lba < pblk->rl.nr_secs)) {
2011 WARN(1, "pblk: corrupted L2P map request\n");
2012 return;
2013 }
2014
2015 spin_lock(&pblk->trans_lock);
9f6cb13b 2016 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b 2017
9f6cb13b
JG
2018 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2019 pblk_map_invalidate(pblk, ppa_l2p);
a4bd217b
JG
2020
2021 pblk_trans_map_set(pblk, lba, ppa);
2022 spin_unlock(&pblk->trans_lock);
2023}
2024
2025void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2026{
d340121e 2027
880eda54 2028#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
2029 /* Callers must ensure that the ppa points to a cache address */
2030 BUG_ON(!pblk_addr_in_cache(ppa));
2031 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2032#endif
2033
2034 pblk_update_map(pblk, lba, ppa);
2035}
2036
9f6cb13b 2037int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
d340121e 2038 struct pblk_line *gc_line, u64 paddr_gc)
a4bd217b 2039{
d340121e 2040 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
2041 int ret = 1;
2042
880eda54 2043#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b 2044 /* Callers must ensure that the ppa points to a cache address */
9f6cb13b
JG
2045 BUG_ON(!pblk_addr_in_cache(ppa_new));
2046 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
a4bd217b
JG
2047#endif
2048
2049 /* logic error: lba out-of-bounds. Ignore update */
2050 if (!(lba < pblk->rl.nr_secs)) {
2051 WARN(1, "pblk: corrupted L2P map request\n");
2052 return 0;
2053 }
2054
2055 spin_lock(&pblk->trans_lock);
9f6cb13b 2056 ppa_l2p = pblk_trans_map_get(pblk, lba);
d340121e 2057 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
a4bd217b 2058
d340121e
JG
2059 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2060 spin_lock(&gc_line->lock);
2061 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2062 "pblk: corrupted GC update");
2063 spin_unlock(&gc_line->lock);
9f6cb13b 2064
a4bd217b
JG
2065 ret = 0;
2066 goto out;
2067 }
2068
9f6cb13b 2069 pblk_trans_map_set(pblk, lba, ppa_new);
a4bd217b
JG
2070out:
2071 spin_unlock(&pblk->trans_lock);
2072 return ret;
2073}
2074
9f6cb13b
JG
2075void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2076 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
a4bd217b 2077{
9f6cb13b 2078 struct ppa_addr ppa_l2p;
a4bd217b 2079
880eda54 2080#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b 2081 /* Callers must ensure that the ppa points to a device address */
9f6cb13b 2082 BUG_ON(pblk_addr_in_cache(ppa_mapped));
a4bd217b
JG
2083#endif
2084 /* Invalidate and discard padded entries */
2085 if (lba == ADDR_EMPTY) {
76758390 2086 atomic64_inc(&pblk->pad_wa);
880eda54 2087#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
2088 atomic_long_inc(&pblk->padded_wb);
2089#endif
9f6cb13b
JG
2090 if (!pblk_ppa_empty(ppa_mapped))
2091 pblk_map_invalidate(pblk, ppa_mapped);
a4bd217b
JG
2092 return;
2093 }
2094
2095 /* logic error: lba out-of-bounds. Ignore update */
2096 if (!(lba < pblk->rl.nr_secs)) {
2097 WARN(1, "pblk: corrupted L2P map request\n");
2098 return;
2099 }
2100
2101 spin_lock(&pblk->trans_lock);
9f6cb13b 2102 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
2103
2104 /* Do not update L2P if the cacheline has been updated. In this case,
2105 * the mapped ppa must be invalidated
2106 */
9f6cb13b
JG
2107 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2108 if (!pblk_ppa_empty(ppa_mapped))
2109 pblk_map_invalidate(pblk, ppa_mapped);
a4bd217b
JG
2110 goto out;
2111 }
2112
880eda54 2113#ifdef CONFIG_NVM_PBLK_DEBUG
9f6cb13b 2114 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
a4bd217b
JG
2115#endif
2116
9f6cb13b 2117 pblk_trans_map_set(pblk, lba, ppa_mapped);
a4bd217b
JG
2118out:
2119 spin_unlock(&pblk->trans_lock);
2120}
2121
2122void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2123 sector_t blba, int nr_secs)
2124{
2125 int i;
2126
2127 spin_lock(&pblk->trans_lock);
7bd4d370
JG
2128 for (i = 0; i < nr_secs; i++) {
2129 struct ppa_addr ppa;
2130
2131 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2132
2133 /* If the L2P entry maps to a line, the reference is valid */
2134 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
cb21665c 2135 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
7bd4d370
JG
2136
2137 kref_get(&line->ref);
2138 }
2139 }
a4bd217b
JG
2140 spin_unlock(&pblk->trans_lock);
2141}
2142
2143void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2144 u64 *lba_list, int nr_secs)
2145{
d340121e 2146 u64 lba;
a4bd217b
JG
2147 int i;
2148
2149 spin_lock(&pblk->trans_lock);
2150 for (i = 0; i < nr_secs; i++) {
2151 lba = lba_list[i];
d340121e 2152 if (lba != ADDR_EMPTY) {
a4bd217b
JG
2153 /* logic error: lba out-of-bounds. Ignore update */
2154 if (!(lba < pblk->rl.nr_secs)) {
2155 WARN(1, "pblk: corrupted L2P map request\n");
2156 continue;
2157 }
2158 ppas[i] = pblk_trans_map_get(pblk, lba);
2159 }
2160 }
2161 spin_unlock(&pblk->trans_lock);
2162}
This page took 0.496053 seconds and 4 git commands to generate.