1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-gc.c - pblk's garbage collector
20 #include "pblk-trace.h"
21 #include <linux/delay.h>
24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
31 static int pblk_gc_write(struct pblk *pblk)
33 struct pblk_gc *gc = &pblk->gc;
34 struct pblk_gc_rq *gc_rq, *tgc_rq;
37 spin_lock(&gc->w_lock);
38 if (list_empty(&gc->w_list)) {
39 spin_unlock(&gc->w_lock);
43 list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
45 spin_unlock(&gc->w_lock);
47 list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
48 pblk_write_gc_to_cache(pblk, gc_rq);
49 list_del(&gc_rq->list);
50 kref_put(&gc_rq->line->ref, pblk_line_put);
51 pblk_gc_free_gc_rq(gc_rq);
57 static void pblk_gc_writer_kick(struct pblk_gc *gc)
59 wake_up_process(gc->gc_writer_ts);
62 void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
64 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
65 struct list_head *move_list;
67 spin_lock(&l_mg->gc_lock);
68 spin_lock(&line->lock);
69 WARN_ON(line->state != PBLK_LINESTATE_GC);
70 line->state = PBLK_LINESTATE_CLOSED;
71 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
74 /* We need to reset gc_group in order to ensure that
75 * pblk_line_gc_list will return proper move_list
76 * since right now current line is not on any of the
79 line->gc_group = PBLK_LINEGC_NONE;
80 move_list = pblk_line_gc_list(pblk, line);
81 spin_unlock(&line->lock);
82 list_add_tail(&line->list, move_list);
83 spin_unlock(&l_mg->gc_lock);
86 static void pblk_gc_line_ws(struct work_struct *work)
88 struct pblk_line_ws *gc_rq_ws = container_of(work,
89 struct pblk_line_ws, ws);
90 struct pblk *pblk = gc_rq_ws->pblk;
91 struct pblk_gc *gc = &pblk->gc;
92 struct pblk_line *line = gc_rq_ws->line;
93 struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
98 /* Read from GC victim block */
99 ret = pblk_submit_read_gc(pblk, gc_rq);
101 line->w_err_gc->has_gc_err = 1;
105 if (!gc_rq->secs_to_gc)
109 spin_lock(&gc->w_lock);
110 if (gc->w_entries >= PBLK_GC_RQ_QD) {
111 spin_unlock(&gc->w_lock);
112 pblk_gc_writer_kick(&pblk->gc);
113 usleep_range(128, 256);
117 list_add_tail(&gc_rq->list, &gc->w_list);
118 spin_unlock(&gc->w_lock);
120 pblk_gc_writer_kick(&pblk->gc);
126 pblk_gc_free_gc_rq(gc_rq);
127 kref_put(&line->ref, pblk_line_put);
131 static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
132 struct pblk_line *line)
134 struct line_emeta *emeta_buf;
135 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
136 struct pblk_line_meta *lm = &pblk->lm;
137 unsigned int lba_list_size = lm->emeta_len[2];
141 emeta_buf = pblk_malloc(lm->emeta_len[0],
142 l_mg->emeta_alloc_type, GFP_KERNEL);
146 ret = pblk_line_emeta_read(pblk, line, emeta_buf);
148 pblk_err(pblk, "line %d read emeta failed (%d)\n",
150 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
154 /* If this read fails, it means that emeta is corrupted.
155 * For now, leave the line untouched.
156 * TODO: Implement a recovery routine that scans and moves
157 * all sectors on the line.
160 ret = pblk_recov_check_emeta(pblk, emeta_buf);
162 pblk_err(pblk, "inconsistent emeta (line %d)\n",
164 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
168 lba_list = pblk_malloc(lba_list_size,
169 l_mg->emeta_alloc_type, GFP_KERNEL);
171 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
173 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
178 static void pblk_gc_line_prepare_ws(struct work_struct *work)
180 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
182 struct pblk *pblk = line_ws->pblk;
183 struct pblk_line *line = line_ws->line;
184 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
185 struct pblk_line_meta *lm = &pblk->lm;
186 struct nvm_tgt_dev *dev = pblk->dev;
187 struct nvm_geo *geo = &dev->geo;
188 struct pblk_gc *gc = &pblk->gc;
189 struct pblk_line_ws *gc_rq_ws;
190 struct pblk_gc_rq *gc_rq;
192 unsigned long *invalid_bitmap;
193 int sec_left, nr_secs, bit;
195 invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
199 if (line->w_err_gc->has_write_err) {
200 lba_list = line->w_err_gc->lba_list;
201 line->w_err_gc->lba_list = NULL;
203 lba_list = get_lba_list_from_emeta(pblk, line);
205 pblk_err(pblk, "could not interpret emeta (line %d)\n",
207 goto fail_free_invalid_bitmap;
211 spin_lock(&line->lock);
212 bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
213 sec_left = pblk_line_vsc(line);
214 spin_unlock(&line->lock);
217 pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
218 goto fail_free_lba_list;
223 gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
225 goto fail_free_lba_list;
229 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
231 if (bit > line->emeta_ssec)
234 gc_rq->paddr_list[nr_secs] = bit;
235 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
236 } while (nr_secs < pblk->max_write_pgs);
238 if (unlikely(!nr_secs)) {
243 gc_rq->nr_secs = nr_secs;
246 gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
248 goto fail_free_gc_rq;
250 gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
252 goto fail_free_gc_data;
254 gc_rq_ws->pblk = pblk;
255 gc_rq_ws->line = line;
256 gc_rq_ws->priv = gc_rq;
258 /* The write GC path can be much slower than the read GC one due to
259 * the budget imposed by the rate-limiter. Balance in case that we get
260 * back pressure from the write GC path.
262 while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
265 kref_get(&line->ref);
267 INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
268 queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
275 pblk_mfree(lba_list, l_mg->emeta_alloc_type);
277 kfree(invalid_bitmap);
279 kref_put(&line->ref, pblk_line_put);
280 atomic_dec(&gc->read_inflight_gc);
289 pblk_mfree(lba_list, l_mg->emeta_alloc_type);
290 fail_free_invalid_bitmap:
291 kfree(invalid_bitmap);
295 /* Line goes back to closed state, so we cannot release additional
296 * reference for line, since we do that only when we want to do
297 * gc to free line state transition.
299 pblk_put_line_back(pblk, line);
300 atomic_dec(&gc->read_inflight_gc);
302 pblk_err(pblk, "failed to GC line %d\n", line->id);
305 static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
307 struct pblk_gc *gc = &pblk->gc;
308 struct pblk_line_ws *line_ws;
310 pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
312 line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
316 line_ws->pblk = pblk;
317 line_ws->line = line;
319 atomic_inc(&gc->pipeline_gc);
320 INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
321 queue_work(gc->gc_reader_wq, &line_ws->ws);
326 static void pblk_gc_reader_kick(struct pblk_gc *gc)
328 wake_up_process(gc->gc_reader_ts);
331 static void pblk_gc_kick(struct pblk *pblk)
333 struct pblk_gc *gc = &pblk->gc;
335 pblk_gc_writer_kick(gc);
336 pblk_gc_reader_kick(gc);
338 /* If we're shutting down GC, let's not start it up again */
339 if (gc->gc_enabled) {
340 wake_up_process(gc->gc_ts);
341 mod_timer(&gc->gc_timer,
342 jiffies + msecs_to_jiffies(GC_TIME_MSECS));
346 static int pblk_gc_read(struct pblk *pblk)
348 struct pblk_gc *gc = &pblk->gc;
349 struct pblk_line *line;
351 spin_lock(&gc->r_lock);
352 if (list_empty(&gc->r_list)) {
353 spin_unlock(&gc->r_lock);
357 line = list_first_entry(&gc->r_list, struct pblk_line, list);
358 list_del(&line->list);
359 spin_unlock(&gc->r_lock);
363 if (pblk_gc_line(pblk, line)) {
364 pblk_err(pblk, "failed to GC line %d\n", line->id);
366 spin_lock(&gc->r_lock);
367 list_add_tail(&line->list, &gc->r_list);
368 spin_unlock(&gc->r_lock);
374 static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
375 struct list_head *group_list)
377 struct pblk_line *line, *victim;
378 unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
380 victim = list_first_entry(group_list, struct pblk_line, list);
382 list_for_each_entry(line, group_list, list) {
383 if (!atomic_read(&line->sec_to_update))
384 line_vsc = le32_to_cpu(*line->vsc);
385 if (line_vsc < victim_vsc) {
387 victim_vsc = le32_to_cpu(*victim->vsc);
391 if (victim_vsc == ~0x0)
397 static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
399 unsigned int nr_blocks_free, nr_blocks_need;
400 unsigned int werr_lines = atomic_read(&rl->werr_lines);
402 nr_blocks_need = pblk_rl_high_thrs(rl);
403 nr_blocks_free = pblk_rl_nr_free_blks(rl);
405 /* This is not critical, no need to take lock here */
406 return ((werr_lines > 0) ||
407 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
410 void pblk_gc_free_full_lines(struct pblk *pblk)
412 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
413 struct pblk_gc *gc = &pblk->gc;
414 struct pblk_line *line;
417 spin_lock(&l_mg->gc_lock);
418 if (list_empty(&l_mg->gc_full_list)) {
419 spin_unlock(&l_mg->gc_lock);
423 line = list_first_entry(&l_mg->gc_full_list,
424 struct pblk_line, list);
426 spin_lock(&line->lock);
427 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
428 line->state = PBLK_LINESTATE_GC;
429 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
431 spin_unlock(&line->lock);
433 list_del(&line->list);
434 spin_unlock(&l_mg->gc_lock);
436 atomic_inc(&gc->pipeline_gc);
437 kref_put(&line->ref, pblk_line_put);
442 * Lines with no valid sectors will be returned to the free list immediately. If
443 * GC is activated - either because the free block count is under the determined
444 * threshold, or because it is being forced from user space - only lines with a
445 * high count of invalid sectors will be recycled.
447 static void pblk_gc_run(struct pblk *pblk)
449 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
450 struct pblk_gc *gc = &pblk->gc;
451 struct pblk_line *line;
452 struct list_head *group_list;
454 int read_inflight_gc, gc_group = 0, prev_group = 0;
456 pblk_gc_free_full_lines(pblk);
458 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
459 if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
463 group_list = l_mg->gc_lists[gc_group++];
466 spin_lock(&l_mg->gc_lock);
468 line = pblk_gc_get_victim_line(pblk, group_list);
470 spin_unlock(&l_mg->gc_lock);
474 spin_lock(&line->lock);
475 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
476 line->state = PBLK_LINESTATE_GC;
477 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
479 spin_unlock(&line->lock);
481 list_del(&line->list);
482 spin_unlock(&l_mg->gc_lock);
484 spin_lock(&gc->r_lock);
485 list_add_tail(&line->list, &gc->r_list);
486 spin_unlock(&gc->r_lock);
488 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
489 pblk_gc_reader_kick(gc);
493 /* No need to queue up more GC lines than we can handle */
494 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
495 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
499 if (!prev_group && pblk->rl.rb_state > gc_group &&
500 gc_group < PBLK_GC_NR_LISTS)
504 static void pblk_gc_timer(struct timer_list *t)
506 struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
511 static int pblk_gc_ts(void *data)
513 struct pblk *pblk = data;
515 while (!kthread_should_stop()) {
517 set_current_state(TASK_INTERRUPTIBLE);
524 static int pblk_gc_writer_ts(void *data)
526 struct pblk *pblk = data;
528 while (!kthread_should_stop()) {
529 if (!pblk_gc_write(pblk))
531 set_current_state(TASK_INTERRUPTIBLE);
538 static int pblk_gc_reader_ts(void *data)
540 struct pblk *pblk = data;
541 struct pblk_gc *gc = &pblk->gc;
543 while (!kthread_should_stop()) {
544 if (!pblk_gc_read(pblk))
546 set_current_state(TASK_INTERRUPTIBLE);
550 #ifdef CONFIG_NVM_PBLK_DEBUG
551 pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
552 atomic_read(&gc->pipeline_gc));
556 if (!atomic_read(&gc->pipeline_gc))
565 static void pblk_gc_start(struct pblk *pblk)
567 pblk->gc.gc_active = 1;
568 pblk_debug(pblk, "gc start\n");
571 void pblk_gc_should_start(struct pblk *pblk)
573 struct pblk_gc *gc = &pblk->gc;
575 if (gc->gc_enabled && !gc->gc_active) {
581 void pblk_gc_should_stop(struct pblk *pblk)
583 struct pblk_gc *gc = &pblk->gc;
585 if (gc->gc_active && !gc->gc_forced)
589 void pblk_gc_should_kick(struct pblk *pblk)
591 pblk_rl_update_rates(&pblk->rl);
594 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
597 struct pblk_gc *gc = &pblk->gc;
599 spin_lock(&gc->lock);
600 *gc_enabled = gc->gc_enabled;
601 *gc_active = gc->gc_active;
602 spin_unlock(&gc->lock);
605 int pblk_gc_sysfs_force(struct pblk *pblk, int force)
607 struct pblk_gc *gc = &pblk->gc;
609 if (force < 0 || force > 1)
612 spin_lock(&gc->lock);
613 gc->gc_forced = force;
619 spin_unlock(&gc->lock);
621 pblk_gc_should_start(pblk);
626 int pblk_gc_init(struct pblk *pblk)
628 struct pblk_gc *gc = &pblk->gc;
631 gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
632 if (IS_ERR(gc->gc_ts)) {
633 pblk_err(pblk, "could not allocate GC main kthread\n");
634 return PTR_ERR(gc->gc_ts);
637 gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
638 "pblk-gc-writer-ts");
639 if (IS_ERR(gc->gc_writer_ts)) {
640 pblk_err(pblk, "could not allocate GC writer kthread\n");
641 ret = PTR_ERR(gc->gc_writer_ts);
642 goto fail_free_main_kthread;
645 gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
646 "pblk-gc-reader-ts");
647 if (IS_ERR(gc->gc_reader_ts)) {
648 pblk_err(pblk, "could not allocate GC reader kthread\n");
649 ret = PTR_ERR(gc->gc_reader_ts);
650 goto fail_free_writer_kthread;
653 timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
654 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
660 atomic_set(&gc->read_inflight_gc, 0);
661 atomic_set(&gc->pipeline_gc, 0);
663 /* Workqueue that reads valid sectors from a line and submit them to the
664 * GC writer to be recycled.
666 gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
667 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
668 if (!gc->gc_line_reader_wq) {
669 pblk_err(pblk, "could not allocate GC line reader workqueue\n");
671 goto fail_free_reader_kthread;
674 /* Workqueue that prepare lines for GC */
675 gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
676 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
677 if (!gc->gc_reader_wq) {
678 pblk_err(pblk, "could not allocate GC reader workqueue\n");
680 goto fail_free_reader_line_wq;
683 spin_lock_init(&gc->lock);
684 spin_lock_init(&gc->w_lock);
685 spin_lock_init(&gc->r_lock);
687 sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
689 INIT_LIST_HEAD(&gc->w_list);
690 INIT_LIST_HEAD(&gc->r_list);
694 fail_free_reader_line_wq:
695 destroy_workqueue(gc->gc_line_reader_wq);
696 fail_free_reader_kthread:
697 kthread_stop(gc->gc_reader_ts);
698 fail_free_writer_kthread:
699 kthread_stop(gc->gc_writer_ts);
700 fail_free_main_kthread:
701 kthread_stop(gc->gc_ts);
706 void pblk_gc_exit(struct pblk *pblk, bool graceful)
708 struct pblk_gc *gc = &pblk->gc;
711 del_timer_sync(&gc->gc_timer);
715 kthread_stop(gc->gc_ts);
717 if (gc->gc_reader_ts)
718 kthread_stop(gc->gc_reader_ts);
721 flush_workqueue(gc->gc_reader_wq);
722 flush_workqueue(gc->gc_line_reader_wq);
725 destroy_workqueue(gc->gc_reader_wq);
726 destroy_workqueue(gc->gc_line_reader_wq);
728 if (gc->gc_writer_ts)
729 kthread_stop(gc->gc_writer_ts);