2 * Copyright (C) 2016 CNEX Labs
5 * Based upon the circular ringbuffer.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-rb.c - pblk's write buffer
19 #include <linux/circ_buf.h>
23 static DECLARE_RWSEM(pblk_rb_lock);
25 void pblk_rb_data_free(struct pblk_rb *rb)
27 struct pblk_rb_pages *p, *t;
29 down_write(&pblk_rb_lock);
30 list_for_each_entry_safe(p, t, &rb->pages, list) {
31 free_pages((unsigned long)page_address(p->pages), p->order);
35 up_write(&pblk_rb_lock);
39 * Initialize ring buffer. The data and metadata buffers must be previously
40 * allocated and their size must be a power of two
41 * (Documentation/circular-buffers.txt)
43 int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
44 unsigned int power_size, unsigned int power_seg_sz)
46 struct pblk *pblk = container_of(rb, struct pblk, rwb);
47 unsigned int init_entry = 0;
48 unsigned int alloc_order = power_size;
49 unsigned int max_order = MAX_ORDER - 1;
50 unsigned int order, iter;
52 down_write(&pblk_rb_lock);
53 rb->entries = rb_entry_base;
54 rb->seg_size = (1 << power_seg_sz);
55 rb->nr_entries = (1 << power_size);
56 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
57 rb->sync_point = EMPTY_ENTRY;
59 spin_lock_init(&rb->w_lock);
60 spin_lock_init(&rb->s_lock);
62 INIT_LIST_HEAD(&rb->pages);
64 if (alloc_order >= max_order) {
66 iter = (1 << (alloc_order - max_order));
73 struct pblk_rb_entry *entry;
74 struct pblk_rb_pages *page_set;
76 unsigned long set_size;
79 page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
81 up_write(&pblk_rb_lock);
85 page_set->order = order;
86 page_set->pages = alloc_pages(GFP_KERNEL, order);
87 if (!page_set->pages) {
89 pblk_rb_data_free(rb);
90 up_write(&pblk_rb_lock);
93 kaddr = page_address(page_set->pages);
95 entry = &rb->entries[init_entry];
97 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
98 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
100 set_size = (1 << order);
101 for (i = 1; i < set_size; i++) {
102 entry = &rb->entries[init_entry];
103 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
104 entry->data = kaddr + (i * rb->seg_size);
105 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
106 bio_list_init(&entry->w_ctx.bios);
109 list_add_tail(&page_set->list, &rb->pages);
112 up_write(&pblk_rb_lock);
114 #ifdef CONFIG_NVM_DEBUG
115 atomic_set(&rb->inflight_sync_point, 0);
119 * Initialize rate-limiter, which controls access to the write buffer
120 * but user and GC I/O
122 pblk_rl_init(&pblk->rl, rb->nr_entries);
128 * pblk_rb_calculate_size -- calculate the size of the write buffer
130 unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
132 /* Alloc a write buffer that can at least fit 128 entries */
133 return (1 << max(get_count_order(nr_entries), 7));
136 void *pblk_rb_entries_ref(struct pblk_rb *rb)
141 static void clean_wctx(struct pblk_w_ctx *w_ctx)
146 flags = READ_ONCE(w_ctx->flags);
147 if (!(flags & PBLK_SUBMITTED_ENTRY))
150 /* Release flags on context. Protect from writes and reads */
151 smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
152 pblk_ppa_set_empty(&w_ctx->ppa);
155 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
156 #define pblk_rb_ring_space(rb, head, tail, size) \
157 (CIRC_SPACE(head, tail, size))
160 * Buffer space is calculated with respect to the back pointer signaling
161 * synchronized entries to the media.
163 static unsigned int pblk_rb_space(struct pblk_rb *rb)
165 unsigned int mem = READ_ONCE(rb->mem);
166 unsigned int sync = READ_ONCE(rb->sync);
168 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
172 * Buffer count is calculated with respect to the submission entry signaling the
173 * entries that are available to send to the media
175 unsigned int pblk_rb_read_count(struct pblk_rb *rb)
177 unsigned int mem = READ_ONCE(rb->mem);
178 unsigned int subm = READ_ONCE(rb->subm);
180 return pblk_rb_ring_count(mem, subm, rb->nr_entries);
183 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
187 subm = READ_ONCE(rb->subm);
188 /* Commit read means updating submission pointer */
189 smp_store_release(&rb->subm,
190 (subm + nr_entries) & (rb->nr_entries - 1));
195 static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
196 unsigned int to_update)
198 struct pblk *pblk = container_of(rb, struct pblk, rwb);
199 struct pblk_line *line;
200 struct pblk_rb_entry *entry;
201 struct pblk_w_ctx *w_ctx;
204 for (i = 0; i < to_update; i++) {
205 entry = &rb->entries[*l2p_upd];
206 w_ctx = &entry->w_ctx;
208 pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
211 line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
212 kref_put(&line->ref, pblk_line_put);
214 *l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1);
221 * When we move the l2p_update pointer, we update the l2p table - lookups will
222 * point to the physical address instead of to the cacheline in the write buffer
223 * from this moment on.
225 static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
226 unsigned int mem, unsigned int sync)
228 unsigned int space, count;
231 lockdep_assert_held(&rb->w_lock);
233 /* Update l2p only as buffer entries are being overwritten */
234 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
235 if (space > nr_entries)
238 count = nr_entries - space;
239 /* l2p_update used exclusively under rb->w_lock */
240 ret = __pblk_rb_update_l2p(rb, &rb->l2p_update, count);
247 * Update the l2p entry for all sectors stored on the write buffer. This means
248 * that all future lookups to the l2p table will point to a device address, not
249 * to the cacheline in the write buffer.
251 void pblk_rb_sync_l2p(struct pblk_rb *rb)
254 unsigned int to_update;
256 spin_lock(&rb->w_lock);
258 /* Protect from reads and writes */
259 sync = smp_load_acquire(&rb->sync);
261 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
262 __pblk_rb_update_l2p(rb, &rb->l2p_update, to_update);
264 spin_unlock(&rb->w_lock);
268 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
269 * Typically, 4KB data chunks coming from a bio will be copied to the ring
270 * buffer, thus the write will fail if not all incoming data can be copied.
273 static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
274 struct pblk_w_ctx w_ctx,
275 struct pblk_rb_entry *entry)
277 memcpy(entry->data, data, rb->seg_size);
279 entry->w_ctx.lba = w_ctx.lba;
280 entry->w_ctx.ppa = w_ctx.ppa;
283 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
284 struct pblk_w_ctx w_ctx, unsigned int ring_pos)
286 struct pblk *pblk = container_of(rb, struct pblk, rwb);
287 struct pblk_rb_entry *entry;
290 entry = &rb->entries[ring_pos];
291 flags = READ_ONCE(entry->w_ctx.flags);
292 #ifdef CONFIG_NVM_DEBUG
293 /* Caller must guarantee that the entry is free */
294 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
297 __pblk_rb_write_entry(rb, data, w_ctx, entry);
299 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
300 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
302 /* Release flags on write context. Protect from writes */
303 smp_store_release(&entry->w_ctx.flags, flags);
306 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
307 struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
308 unsigned int ring_pos)
310 struct pblk *pblk = container_of(rb, struct pblk, rwb);
311 struct pblk_rb_entry *entry;
314 entry = &rb->entries[ring_pos];
315 flags = READ_ONCE(entry->w_ctx.flags);
316 #ifdef CONFIG_NVM_DEBUG
317 /* Caller must guarantee that the entry is free */
318 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
321 __pblk_rb_write_entry(rb, data, w_ctx, entry);
323 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, gc_line))
324 entry->w_ctx.lba = ADDR_EMPTY;
326 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
328 /* Release flags on write context. Protect from writes */
329 smp_store_release(&entry->w_ctx.flags, flags);
332 static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
335 struct pblk_rb_entry *entry;
336 unsigned int subm, sync_point;
339 subm = READ_ONCE(rb->subm);
341 #ifdef CONFIG_NVM_DEBUG
342 atomic_inc(&rb->inflight_sync_point);
348 sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
349 entry = &rb->entries[sync_point];
351 flags = READ_ONCE(entry->w_ctx.flags);
352 flags |= PBLK_FLUSH_ENTRY;
354 /* Release flags on context. Protect from writes */
355 smp_store_release(&entry->w_ctx.flags, flags);
358 smp_store_release(&rb->sync_point, sync_point);
360 spin_lock_irq(&rb->s_lock);
361 bio_list_add(&entry->w_ctx.bios, bio);
362 spin_unlock_irq(&rb->s_lock);
367 static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
373 sync = READ_ONCE(rb->sync);
374 mem = READ_ONCE(rb->mem);
376 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries)
379 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
387 static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
390 if (!__pblk_rb_may_write(rb, nr_entries, pos))
393 /* Protect from read count */
394 smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1));
398 static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
399 unsigned int *pos, struct bio *bio,
404 if (!__pblk_rb_may_write(rb, nr_entries, pos))
407 mem = (*pos + nr_entries) & (rb->nr_entries - 1);
408 *io_ret = NVM_IO_DONE;
410 if (bio->bi_opf & REQ_PREFLUSH) {
411 struct pblk *pblk = container_of(rb, struct pblk, rwb);
413 #ifdef CONFIG_NVM_DEBUG
414 atomic_long_inc(&pblk->nr_flush);
416 if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
420 /* Protect from read count */
421 smp_store_release(&rb->mem, mem);
426 * Atomically check that (i) there is space on the write buffer for the
427 * incoming I/O, and (ii) the current I/O type has enough budget in the write
428 * buffer (rate-limiter).
430 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
431 unsigned int nr_entries, unsigned int *pos)
433 struct pblk *pblk = container_of(rb, struct pblk, rwb);
436 spin_lock(&rb->w_lock);
437 if (!pblk_rl_user_may_insert(&pblk->rl, nr_entries)) {
438 spin_unlock(&rb->w_lock);
439 return NVM_IO_REQUEUE;
442 if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &flush_done)) {
443 spin_unlock(&rb->w_lock);
444 return NVM_IO_REQUEUE;
447 pblk_rl_user_in(&pblk->rl, nr_entries);
448 spin_unlock(&rb->w_lock);
454 * Look at pblk_rb_may_write_user comment
456 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
459 struct pblk *pblk = container_of(rb, struct pblk, rwb);
461 spin_lock(&rb->w_lock);
462 if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
463 spin_unlock(&rb->w_lock);
467 if (!pblk_rb_may_write(rb, nr_entries, pos)) {
468 spin_unlock(&rb->w_lock);
472 pblk_rl_gc_in(&pblk->rl, nr_entries);
473 spin_unlock(&rb->w_lock);
479 * The caller of this function must ensure that the backpointer will not
480 * overwrite the entries passed on the list.
482 unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
483 struct list_head *list,
486 struct pblk_rb_entry *entry, *tentry;
488 unsigned int read = 0;
491 list_for_each_entry_safe(entry, tentry, list, index) {
493 pr_err("pblk: too many entries on list\n");
497 page = virt_to_page(entry->data);
499 pr_err("pblk: could not allocate write bio page\n");
503 ret = bio_add_page(bio, page, rb->seg_size, 0);
504 if (ret != rb->seg_size) {
505 pr_err("pblk: could not add page to write bio\n");
509 list_del(&entry->index);
518 * Read available entries on rb and add them to the given bio. To avoid a memory
519 * copy, a page reference to the write buffer is used to be added to the bio.
521 * This function is used by the write thread to form the write bio that will
522 * persist data on the write buffer to the media.
524 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
525 struct pblk_c_ctx *c_ctx,
527 unsigned int nr_entries,
530 struct pblk *pblk = container_of(rb, struct pblk, rwb);
531 struct pblk_rb_entry *entry;
533 unsigned int pad = 0, read = 0, to_read = nr_entries;
534 unsigned int user_io = 0, gc_io = 0;
539 if (count < nr_entries) {
540 pad = nr_entries - count;
545 c_ctx->nr_valid = to_read;
546 c_ctx->nr_padded = pad;
548 for (i = 0; i < to_read; i++) {
549 entry = &rb->entries[pos];
551 /* A write has been allowed into the buffer, but data is still
552 * being copied to it. It is ok to busy wait.
555 flags = READ_ONCE(entry->w_ctx.flags);
556 if (!(flags & PBLK_WRITTEN_DATA))
559 if (flags & PBLK_IOTYPE_USER)
561 else if (flags & PBLK_IOTYPE_GC)
564 WARN(1, "pblk: unknown IO type\n");
566 page = virt_to_page(entry->data);
568 pr_err("pblk: could not allocate write bio page\n");
569 flags &= ~PBLK_WRITTEN_DATA;
570 flags |= PBLK_SUBMITTED_ENTRY;
571 /* Release flags on context. Protect from writes */
572 smp_store_release(&entry->w_ctx.flags, flags);
576 ret = bio_add_page(bio, page, rb->seg_size, 0);
577 if (ret != rb->seg_size) {
578 pr_err("pblk: could not add page to write bio\n");
579 flags &= ~PBLK_WRITTEN_DATA;
580 flags |= PBLK_SUBMITTED_ENTRY;
581 /* Release flags on context. Protect from writes */
582 smp_store_release(&entry->w_ctx.flags, flags);
586 if (flags & PBLK_FLUSH_ENTRY) {
587 unsigned int sync_point;
589 sync_point = READ_ONCE(rb->sync_point);
590 if (sync_point == pos) {
592 smp_store_release(&rb->sync_point, EMPTY_ENTRY);
595 flags &= ~PBLK_FLUSH_ENTRY;
596 #ifdef CONFIG_NVM_DEBUG
597 atomic_dec(&rb->inflight_sync_point);
601 flags &= ~PBLK_WRITTEN_DATA;
602 flags |= PBLK_SUBMITTED_ENTRY;
604 /* Release flags on context. Protect from writes */
605 smp_store_release(&entry->w_ctx.flags, flags);
607 pos = (pos + 1) & (rb->nr_entries - 1);
611 pblk_rl_out(&pblk->rl, user_io, gc_io);
612 #ifdef CONFIG_NVM_DEBUG
613 atomic_long_add(pad, &((struct pblk *)
614 (container_of(rb, struct pblk, rwb)))->padded_writes);
621 * Copy to bio only if the lba matches the one on the given cache entry.
622 * Otherwise, it means that the entry has been overwritten, and the bio should
623 * be directed to disk.
625 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
626 u64 pos, int bio_iter)
628 struct pblk_rb_entry *entry;
629 struct pblk_w_ctx *w_ctx;
634 spin_lock(&rb->w_lock);
636 #ifdef CONFIG_NVM_DEBUG
637 /* Caller must ensure that the access will not cause an overflow */
638 BUG_ON(pos >= rb->nr_entries);
640 entry = &rb->entries[pos];
641 w_ctx = &entry->w_ctx;
642 flags = READ_ONCE(w_ctx->flags);
644 /* Check if the entry has been overwritten or is scheduled to be */
645 if (w_ctx->lba != lba || flags & PBLK_WRITABLE_ENTRY) {
650 /* Only advance the bio if it hasn't been advanced already. If advanced,
651 * this bio is at least a partial bio (i.e., it has partially been
652 * filled with data from the cache). If part of the data resides on the
653 * media, we will read later on
655 if (unlikely(!bio->bi_iter.bi_idx))
656 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
658 data = bio_data(bio);
659 memcpy(data, entry->data, rb->seg_size);
662 spin_unlock(&rb->w_lock);
666 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
668 unsigned int entry = pos & (rb->nr_entries - 1);
670 return &rb->entries[entry].w_ctx;
673 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
674 __acquires(&rb->s_lock)
677 spin_lock_irqsave(&rb->s_lock, *flags);
679 spin_lock_irq(&rb->s_lock);
684 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
685 __releases(&rb->s_lock)
687 lockdep_assert_held(&rb->s_lock);
690 spin_unlock_irqrestore(&rb->s_lock, *flags);
692 spin_unlock_irq(&rb->s_lock);
695 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
700 lockdep_assert_held(&rb->s_lock);
702 sync = READ_ONCE(rb->sync);
704 for (i = 0; i < nr_entries; i++)
705 sync = (sync + 1) & (rb->nr_entries - 1);
707 /* Protect from counts */
708 smp_store_release(&rb->sync, sync);
713 unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
715 unsigned int subm, sync_point;
719 sync_point = smp_load_acquire(&rb->sync_point);
720 if (sync_point == EMPTY_ENTRY)
723 subm = READ_ONCE(rb->subm);
725 /* The sync point itself counts as a sector to sync */
726 count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
732 * Scan from the current position of the sync pointer to find the entry that
733 * corresponds to the given ppa. This is necessary since write requests can be
734 * completed out of order. The assumption is that the ppa is close to the sync
735 * pointer thus the search will not take long.
737 * The caller of this function must guarantee that the sync pointer will no
738 * reach the entry while it is using the metadata associated with it. With this
739 * assumption in mind, there is no need to take the sync lock.
741 struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
742 struct ppa_addr *ppa)
744 unsigned int sync, subm, count;
747 sync = READ_ONCE(rb->sync);
748 subm = READ_ONCE(rb->subm);
749 count = pblk_rb_ring_count(subm, sync, rb->nr_entries);
751 for (i = 0; i < count; i++)
752 sync = (sync + 1) & (rb->nr_entries - 1);
757 int pblk_rb_tear_down_check(struct pblk_rb *rb)
759 struct pblk_rb_entry *entry;
763 spin_lock(&rb->w_lock);
764 spin_lock_irq(&rb->s_lock);
766 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
767 (rb->sync == rb->l2p_update) &&
768 (rb->sync_point == EMPTY_ENTRY)) {
777 for (i = 0; i < rb->nr_entries; i++) {
778 entry = &rb->entries[i];
787 spin_unlock(&rb->w_lock);
788 spin_unlock_irq(&rb->s_lock);
793 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
795 return (pos & (rb->nr_entries - 1));
798 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
800 return (pos >= rb->nr_entries);
803 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
805 struct pblk *pblk = container_of(rb, struct pblk, rwb);
806 struct pblk_c_ctx *c;
808 int queued_entries = 0;
810 spin_lock_irq(&rb->s_lock);
811 list_for_each_entry(c, &pblk->compl_list, list)
813 spin_unlock_irq(&rb->s_lock);
815 if (rb->sync_point != EMPTY_ENTRY)
816 offset = scnprintf(buf, PAGE_SIZE,
817 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
823 #ifdef CONFIG_NVM_DEBUG
824 atomic_read(&rb->inflight_sync_point),
829 pblk_rb_read_count(rb),
831 pblk_rb_sync_point_count(rb),
834 offset = scnprintf(buf, PAGE_SIZE,
835 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
841 #ifdef CONFIG_NVM_DEBUG
842 atomic_read(&rb->inflight_sync_point),
846 pblk_rb_read_count(rb),
848 pblk_rb_sync_point_count(rb),