]>
Commit | Line | Data |
---|---|---|
a60be987 RK |
1 | /* |
2 | * btnode.c - NILFS B-tree node cache | |
3 | * | |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | * This file was originally written by Seiji Kihara <[email protected]> | |
21 | * and fully revised by Ryusuke Konishi <[email protected]> for | |
22 | * stabilization and simplification. | |
23 | * | |
24 | */ | |
25 | ||
26 | #include <linux/types.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/backing-dev.h> | |
5a0e3ad6 | 30 | #include <linux/gfp.h> |
a60be987 RK |
31 | #include "nilfs.h" |
32 | #include "mdt.h" | |
33 | #include "dat.h" | |
34 | #include "page.h" | |
35 | #include "btnode.h" | |
36 | ||
37 | ||
38 | void nilfs_btnode_cache_init_once(struct address_space *btnc) | |
39 | { | |
1f28fcd9 | 40 | memset(btnc, 0, sizeof(*btnc)); |
a60be987 RK |
41 | INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC); |
42 | spin_lock_init(&btnc->tree_lock); | |
43 | INIT_LIST_HEAD(&btnc->private_list); | |
44 | spin_lock_init(&btnc->private_lock); | |
45 | ||
46 | spin_lock_init(&btnc->i_mmap_lock); | |
47 | INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap); | |
48 | INIT_LIST_HEAD(&btnc->i_mmap_nonlinear); | |
49 | } | |
50 | ||
7f09410b | 51 | static const struct address_space_operations def_btnode_aops = { |
fa032744 RK |
52 | .sync_page = block_sync_page, |
53 | }; | |
a60be987 | 54 | |
a53b4751 RK |
55 | void nilfs_btnode_cache_init(struct address_space *btnc, |
56 | struct backing_dev_info *bdi) | |
a60be987 RK |
57 | { |
58 | btnc->host = NULL; /* can safely set to host inode ? */ | |
59 | btnc->flags = 0; | |
60 | mapping_set_gfp_mask(btnc, GFP_NOFS); | |
61 | btnc->assoc_mapping = NULL; | |
a53b4751 | 62 | btnc->backing_dev_info = bdi; |
a60be987 RK |
63 | btnc->a_ops = &def_btnode_aops; |
64 | } | |
65 | ||
66 | void nilfs_btnode_cache_clear(struct address_space *btnc) | |
67 | { | |
68 | invalidate_mapping_pages(btnc, 0, -1); | |
69 | truncate_inode_pages(btnc, 0); | |
70 | } | |
71 | ||
d501d736 RK |
72 | struct buffer_head * |
73 | nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) | |
74 | { | |
75 | struct inode *inode = NILFS_BTNC_I(btnc); | |
76 | struct buffer_head *bh; | |
77 | ||
78 | bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node); | |
79 | if (unlikely(!bh)) | |
80 | return NULL; | |
81 | ||
82 | if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || | |
83 | buffer_dirty(bh))) { | |
84 | brelse(bh); | |
85 | BUG(); | |
86 | } | |
87 | memset(bh->b_data, 0, 1 << inode->i_blkbits); | |
88 | bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev; | |
89 | bh->b_blocknr = blocknr; | |
90 | set_buffer_mapped(bh); | |
91 | set_buffer_uptodate(bh); | |
92 | ||
93 | unlock_page(bh->b_page); | |
94 | page_cache_release(bh->b_page); | |
95 | return bh; | |
96 | } | |
97 | ||
a60be987 | 98 | int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, |
75f65edf | 99 | sector_t pblocknr, struct buffer_head **pbh) |
a60be987 RK |
100 | { |
101 | struct buffer_head *bh; | |
102 | struct inode *inode = NILFS_BTNC_I(btnc); | |
103 | int err; | |
104 | ||
105 | bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node); | |
106 | if (unlikely(!bh)) | |
107 | return -ENOMEM; | |
108 | ||
109 | err = -EEXIST; /* internal code */ | |
a60be987 RK |
110 | |
111 | if (buffer_uptodate(bh) || buffer_dirty(bh)) | |
112 | goto found; | |
113 | ||
114 | if (pblocknr == 0) { | |
115 | pblocknr = blocknr; | |
116 | if (inode->i_ino != NILFS_DAT_INO) { | |
117 | struct inode *dat = | |
118 | nilfs_dat_inode(NILFS_I_NILFS(inode)); | |
119 | ||
120 | /* blocknr is a virtual block number */ | |
121 | err = nilfs_dat_translate(dat, blocknr, &pblocknr); | |
122 | if (unlikely(err)) { | |
123 | brelse(bh); | |
124 | goto out_locked; | |
125 | } | |
126 | } | |
127 | } | |
128 | lock_buffer(bh); | |
129 | if (buffer_uptodate(bh)) { | |
130 | unlock_buffer(bh); | |
131 | err = -EEXIST; /* internal code */ | |
132 | goto found; | |
133 | } | |
134 | set_buffer_mapped(bh); | |
135 | bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev; | |
136 | bh->b_blocknr = pblocknr; /* set block address for read */ | |
137 | bh->b_end_io = end_buffer_read_sync; | |
138 | get_bh(bh); | |
139 | submit_bh(READ, bh); | |
140 | bh->b_blocknr = blocknr; /* set back to the given block address */ | |
141 | err = 0; | |
142 | found: | |
143 | *pbh = bh; | |
144 | ||
145 | out_locked: | |
146 | unlock_page(bh->b_page); | |
147 | page_cache_release(bh->b_page); | |
148 | return err; | |
149 | } | |
150 | ||
a60be987 RK |
151 | /** |
152 | * nilfs_btnode_delete - delete B-tree node buffer | |
153 | * @bh: buffer to be deleted | |
154 | * | |
155 | * nilfs_btnode_delete() invalidates the specified buffer and delete the page | |
156 | * including the buffer if the page gets unbusy. | |
157 | */ | |
158 | void nilfs_btnode_delete(struct buffer_head *bh) | |
159 | { | |
160 | struct address_space *mapping; | |
161 | struct page *page = bh->b_page; | |
162 | pgoff_t index = page_index(page); | |
163 | int still_dirty; | |
164 | ||
165 | page_cache_get(page); | |
166 | lock_page(page); | |
167 | wait_on_page_writeback(page); | |
168 | ||
169 | nilfs_forget_buffer(bh); | |
170 | still_dirty = PageDirty(page); | |
171 | mapping = page->mapping; | |
172 | unlock_page(page); | |
173 | page_cache_release(page); | |
174 | ||
175 | if (!still_dirty && mapping) | |
176 | invalidate_inode_pages2_range(mapping, index, index); | |
177 | } | |
178 | ||
179 | /** | |
180 | * nilfs_btnode_prepare_change_key | |
181 | * prepare to move contents of the block for old key to one of new key. | |
182 | * the old buffer will not be removed, but might be reused for new buffer. | |
183 | * it might return -ENOMEM because of memory allocation errors, | |
184 | * and might return -EIO because of disk read errors. | |
185 | */ | |
186 | int nilfs_btnode_prepare_change_key(struct address_space *btnc, | |
187 | struct nilfs_btnode_chkey_ctxt *ctxt) | |
188 | { | |
189 | struct buffer_head *obh, *nbh; | |
190 | struct inode *inode = NILFS_BTNC_I(btnc); | |
191 | __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; | |
192 | int err; | |
193 | ||
194 | if (oldkey == newkey) | |
195 | return 0; | |
196 | ||
197 | obh = ctxt->bh; | |
198 | ctxt->newbh = NULL; | |
199 | ||
200 | if (inode->i_blkbits == PAGE_CACHE_SHIFT) { | |
201 | lock_page(obh->b_page); | |
202 | /* | |
203 | * We cannot call radix_tree_preload for the kernels older | |
204 | * than 2.6.23, because it is not exported for modules. | |
205 | */ | |
b1f1b8ce | 206 | retry: |
a60be987 RK |
207 | err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); |
208 | if (err) | |
209 | goto failed_unlock; | |
210 | /* BUG_ON(oldkey != obh->b_page->index); */ | |
211 | if (unlikely(oldkey != obh->b_page->index)) | |
212 | NILFS_PAGE_BUG(obh->b_page, | |
213 | "invalid oldkey %lld (newkey=%lld)", | |
214 | (unsigned long long)oldkey, | |
215 | (unsigned long long)newkey); | |
216 | ||
a60be987 RK |
217 | spin_lock_irq(&btnc->tree_lock); |
218 | err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page); | |
219 | spin_unlock_irq(&btnc->tree_lock); | |
220 | /* | |
221 | * Note: page->index will not change to newkey until | |
222 | * nilfs_btnode_commit_change_key() will be called. | |
223 | * To protect the page in intermediate state, the page lock | |
224 | * is held. | |
225 | */ | |
226 | radix_tree_preload_end(); | |
227 | if (!err) | |
228 | return 0; | |
229 | else if (err != -EEXIST) | |
230 | goto failed_unlock; | |
231 | ||
232 | err = invalidate_inode_pages2_range(btnc, newkey, newkey); | |
233 | if (!err) | |
234 | goto retry; | |
235 | /* fallback to copy mode */ | |
236 | unlock_page(obh->b_page); | |
237 | } | |
238 | ||
45f4910b RK |
239 | nbh = nilfs_btnode_create_block(btnc, newkey); |
240 | if (!nbh) | |
241 | return -ENOMEM; | |
242 | ||
243 | BUG_ON(nbh == obh); | |
244 | ctxt->newbh = nbh; | |
245 | return 0; | |
a60be987 RK |
246 | |
247 | failed_unlock: | |
248 | unlock_page(obh->b_page); | |
249 | return err; | |
250 | } | |
251 | ||
252 | /** | |
253 | * nilfs_btnode_commit_change_key | |
254 | * commit the change_key operation prepared by prepare_change_key(). | |
255 | */ | |
256 | void nilfs_btnode_commit_change_key(struct address_space *btnc, | |
257 | struct nilfs_btnode_chkey_ctxt *ctxt) | |
258 | { | |
259 | struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; | |
260 | __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; | |
261 | struct page *opage; | |
262 | ||
263 | if (oldkey == newkey) | |
264 | return; | |
265 | ||
266 | if (nbh == NULL) { /* blocksize == pagesize */ | |
267 | opage = obh->b_page; | |
268 | if (unlikely(oldkey != opage->index)) | |
269 | NILFS_PAGE_BUG(opage, | |
270 | "invalid oldkey %lld (newkey=%lld)", | |
271 | (unsigned long long)oldkey, | |
272 | (unsigned long long)newkey); | |
b1e19e56 | 273 | nilfs_btnode_mark_dirty(obh); |
a60be987 RK |
274 | |
275 | spin_lock_irq(&btnc->tree_lock); | |
276 | radix_tree_delete(&btnc->page_tree, oldkey); | |
277 | radix_tree_tag_set(&btnc->page_tree, newkey, | |
278 | PAGECACHE_TAG_DIRTY); | |
279 | spin_unlock_irq(&btnc->tree_lock); | |
280 | ||
281 | opage->index = obh->b_blocknr = newkey; | |
282 | unlock_page(opage); | |
283 | } else { | |
284 | nilfs_copy_buffer(nbh, obh); | |
285 | nilfs_btnode_mark_dirty(nbh); | |
286 | ||
287 | nbh->b_blocknr = newkey; | |
288 | ctxt->bh = nbh; | |
289 | nilfs_btnode_delete(obh); /* will decrement bh->b_count */ | |
290 | } | |
291 | } | |
292 | ||
293 | /** | |
294 | * nilfs_btnode_abort_change_key | |
295 | * abort the change_key operation prepared by prepare_change_key(). | |
296 | */ | |
297 | void nilfs_btnode_abort_change_key(struct address_space *btnc, | |
298 | struct nilfs_btnode_chkey_ctxt *ctxt) | |
299 | { | |
300 | struct buffer_head *nbh = ctxt->newbh; | |
301 | __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; | |
302 | ||
303 | if (oldkey == newkey) | |
304 | return; | |
305 | ||
306 | if (nbh == NULL) { /* blocksize == pagesize */ | |
307 | spin_lock_irq(&btnc->tree_lock); | |
308 | radix_tree_delete(&btnc->page_tree, newkey); | |
309 | spin_unlock_irq(&btnc->tree_lock); | |
310 | unlock_page(ctxt->bh->b_page); | |
311 | } else | |
312 | brelse(nbh); | |
313 | } |