2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static void request_complete(struct bio *bio, int err)
19 complete((struct completion *)bio->bi_private);
22 static int sync_request(struct page *page, struct block_device *bdev, int rw)
25 struct bio_vec bio_vec;
26 struct completion complete;
30 bio.bi_io_vec = &bio_vec;
31 bio_vec.bv_page = page;
32 bio_vec.bv_len = PAGE_SIZE;
33 bio_vec.bv_offset = 0;
35 bio.bi_size = PAGE_SIZE;
37 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
38 init_completion(&complete);
39 bio.bi_private = &complete;
40 bio.bi_end_io = request_complete;
43 wait_for_completion(&complete);
44 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
47 static int bdev_readpage(void *_sb, struct page *page)
49 struct super_block *sb = _sb;
50 struct block_device *bdev = logfs_super(sb)->s_bdev;
53 err = sync_request(page, bdev, READ);
55 ClearPageUptodate(page);
58 SetPageUptodate(page);
65 static DECLARE_WAIT_QUEUE_HEAD(wq);
67 static void writeseg_end_io(struct bio *bio, int err)
69 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
70 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
71 struct super_block *sb = bio->bi_private;
72 struct logfs_super *super = logfs_super(sb);
75 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
77 BUG_ON(bio->bi_vcnt == 0);
80 if (--bvec >= bio->bi_io_vec)
81 prefetchw(&bvec->bv_page->flags);
83 end_page_writeback(page);
84 page_cache_release(page);
85 } while (bvec >= bio->bi_io_vec);
87 if (atomic_dec_and_test(&super->s_pending_writes))
91 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
94 struct logfs_super *super = logfs_super(sb);
95 struct address_space *mapping = super->s_mapping_inode->i_mapping;
98 unsigned int max_pages;
101 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
103 bio = bio_alloc(GFP_NOFS, max_pages);
106 for (i = 0; i < nr_pages; i++) {
107 if (i >= max_pages) {
108 /* Block layer cannot split bios :( */
110 bio->bi_size = i * PAGE_SIZE;
111 bio->bi_bdev = super->s_bdev;
112 bio->bi_sector = ofs >> 9;
113 bio->bi_private = sb;
114 bio->bi_end_io = writeseg_end_io;
115 atomic_inc(&super->s_pending_writes);
116 submit_bio(WRITE, bio);
118 ofs += i * PAGE_SIZE;
123 bio = bio_alloc(GFP_NOFS, max_pages);
126 page = find_lock_page(mapping, index + i);
128 bio->bi_io_vec[i].bv_page = page;
129 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
130 bio->bi_io_vec[i].bv_offset = 0;
132 BUG_ON(PageWriteback(page));
133 set_page_writeback(page);
136 bio->bi_vcnt = nr_pages;
137 bio->bi_size = nr_pages * PAGE_SIZE;
138 bio->bi_bdev = super->s_bdev;
139 bio->bi_sector = ofs >> 9;
140 bio->bi_private = sb;
141 bio->bi_end_io = writeseg_end_io;
142 atomic_inc(&super->s_pending_writes);
143 submit_bio(WRITE, bio);
147 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
149 struct logfs_super *super = logfs_super(sb);
152 BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
155 /* This can happen when the object fit perfectly into a
156 * segment, the segment gets written per sync and subsequently
161 head = ofs & (PAGE_SIZE - 1);
166 len = PAGE_ALIGN(len);
167 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
171 static void erase_end_io(struct bio *bio, int err)
173 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
174 struct super_block *sb = bio->bi_private;
175 struct logfs_super *super = logfs_super(sb);
177 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
179 BUG_ON(bio->bi_vcnt == 0);
181 if (atomic_dec_and_test(&super->s_pending_writes))
185 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
188 struct logfs_super *super = logfs_super(sb);
190 unsigned int max_pages;
193 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
195 bio = bio_alloc(GFP_NOFS, max_pages);
198 for (i = 0; i < nr_pages; i++) {
199 if (i >= max_pages) {
200 /* Block layer cannot split bios :( */
202 bio->bi_size = i * PAGE_SIZE;
203 bio->bi_bdev = super->s_bdev;
204 bio->bi_sector = ofs >> 9;
205 bio->bi_private = sb;
206 bio->bi_end_io = erase_end_io;
207 atomic_inc(&super->s_pending_writes);
208 submit_bio(WRITE, bio);
210 ofs += i * PAGE_SIZE;
215 bio = bio_alloc(GFP_NOFS, max_pages);
218 bio->bi_io_vec[i].bv_page = super->s_erase_page;
219 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
220 bio->bi_io_vec[i].bv_offset = 0;
222 bio->bi_vcnt = nr_pages;
223 bio->bi_size = nr_pages * PAGE_SIZE;
224 bio->bi_bdev = super->s_bdev;
225 bio->bi_sector = ofs >> 9;
226 bio->bi_private = sb;
227 bio->bi_end_io = erase_end_io;
228 atomic_inc(&super->s_pending_writes);
229 submit_bio(WRITE, bio);
233 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
236 struct logfs_super *super = logfs_super(sb);
238 BUG_ON(to & (PAGE_SIZE - 1));
239 BUG_ON(len & (PAGE_SIZE - 1));
241 if (super->s_flags & LOGFS_SB_FLAG_RO)
246 * Object store doesn't care whether erases happen or not.
247 * But for the journal they are required. Otherwise a scan
248 * can find an old commit entry and assume it is the current
249 * one, travelling back in time.
251 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
257 static void bdev_sync(struct super_block *sb)
259 struct logfs_super *super = logfs_super(sb);
261 wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
264 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
266 struct logfs_super *super = logfs_super(sb);
267 struct address_space *mapping = super->s_mapping_inode->i_mapping;
268 filler_t *filler = bdev_readpage;
271 return read_cache_page(mapping, 0, filler, sb);
274 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
276 struct logfs_super *super = logfs_super(sb);
277 struct address_space *mapping = super->s_mapping_inode->i_mapping;
278 filler_t *filler = bdev_readpage;
279 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
280 pgoff_t index = pos >> PAGE_SHIFT;
283 return read_cache_page(mapping, index, filler, sb);
286 static int bdev_write_sb(struct super_block *sb, struct page *page)
288 struct block_device *bdev = logfs_super(sb)->s_bdev;
290 /* Nothing special to do for block devices. */
291 return sync_request(page, bdev, WRITE);
294 static void bdev_put_device(struct logfs_super *s)
296 blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
299 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
304 static const struct logfs_device_ops bd_devops = {
305 .find_first_sb = bdev_find_first_sb,
306 .find_last_sb = bdev_find_last_sb,
307 .write_sb = bdev_write_sb,
308 .readpage = bdev_readpage,
309 .writeseg = bdev_writeseg,
311 .can_write_buf = bdev_can_write_buf,
313 .put_device = bdev_put_device,
316 int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
319 struct block_device *bdev;
321 bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
324 return PTR_ERR(bdev);
326 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
327 int mtdnr = MINOR(bdev->bd_dev);
328 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
329 return logfs_get_sb_mtd(p, mtdnr);
334 p->s_devops = &bd_devops;