]>
Commit | Line | Data |
---|---|---|
85e174ba RL |
1 | /* |
2 | * pNFS functions to call and manage layout drivers. | |
3 | * | |
4 | * Copyright (c) 2002 [year of first publication] | |
5 | * The Regents of the University of Michigan | |
6 | * All Rights Reserved | |
7 | * | |
8 | * Dean Hildebrand <[email protected]> | |
9 | * | |
10 | * Permission is granted to use, copy, create derivative works, and | |
11 | * redistribute this software and such derivative works for any purpose, | |
12 | * so long as the name of the University of Michigan is not used in | |
13 | * any advertising or publicity pertaining to the use or distribution | |
14 | * of this software without specific, written prior authorization. If | |
15 | * the above copyright notice or any other identification of the | |
16 | * University of Michigan is included in any copy of any portion of | |
17 | * this software, then the disclaimer below must also be included. | |
18 | * | |
19 | * This software is provided as is, without representation or warranty | |
20 | * of any kind either express or implied, including without limitation | |
21 | * the implied warranties of merchantability, fitness for a particular | |
22 | * purpose, or noninfringement. The Regents of the University of | |
23 | * Michigan shall not be liable for any damages, including special, | |
24 | * indirect, incidental, or consequential damages, with respect to any | |
25 | * claim arising out of or in connection with the use of the software, | |
26 | * even if it has been or is hereafter advised of the possibility of | |
27 | * such damages. | |
28 | */ | |
29 | ||
30 | #include <linux/nfs_fs.h> | |
493292dd | 31 | #include <linux/nfs_page.h> |
143cb494 | 32 | #include <linux/module.h> |
974cec8c | 33 | #include "internal.h" |
85e174ba | 34 | #include "pnfs.h" |
64419a9b | 35 | #include "iostat.h" |
cc668ab3 | 36 | #include "nfs4trace.h" |
40dd4b7a | 37 | #include "delegation.h" |
85e174ba RL |
38 | |
39 | #define NFSDBG_FACILITY NFSDBG_PNFS | |
25c75333 | 40 | #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ) |
85e174ba | 41 | |
02c35fca FI |
42 | /* Locking: |
43 | * | |
44 | * pnfs_spinlock: | |
45 | * protects pnfs_modules_tbl. | |
46 | */ | |
47 | static DEFINE_SPINLOCK(pnfs_spinlock); | |
48 | ||
49 | /* | |
50 | * pnfs_modules_tbl holds all pnfs modules | |
51 | */ | |
52 | static LIST_HEAD(pnfs_modules_tbl); | |
53 | ||
aa1e0e3a PT |
54 | static int |
55 | pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, | |
6c16605d | 56 | enum pnfs_iomode iomode, bool sync); |
aa1e0e3a | 57 | |
02c35fca FI |
58 | /* Return the registered pnfs layout driver module matching given id */ |
59 | static struct pnfs_layoutdriver_type * | |
60 | find_pnfs_driver_locked(u32 id) | |
61 | { | |
62 | struct pnfs_layoutdriver_type *local; | |
63 | ||
64 | list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid) | |
65 | if (local->id == id) | |
66 | goto out; | |
67 | local = NULL; | |
68 | out: | |
69 | dprintk("%s: Searching for id %u, found %p\n", __func__, id, local); | |
70 | return local; | |
71 | } | |
72 | ||
85e174ba RL |
73 | static struct pnfs_layoutdriver_type * |
74 | find_pnfs_driver(u32 id) | |
75 | { | |
02c35fca FI |
76 | struct pnfs_layoutdriver_type *local; |
77 | ||
78 | spin_lock(&pnfs_spinlock); | |
79 | local = find_pnfs_driver_locked(id); | |
0a9c63fa TM |
80 | if (local != NULL && !try_module_get(local->owner)) { |
81 | dprintk("%s: Could not grab reference on module\n", __func__); | |
82 | local = NULL; | |
83 | } | |
02c35fca FI |
84 | spin_unlock(&pnfs_spinlock); |
85 | return local; | |
85e174ba RL |
86 | } |
87 | ||
88 | void | |
89 | unset_pnfs_layoutdriver(struct nfs_server *nfss) | |
90 | { | |
738fd0f3 BH |
91 | if (nfss->pnfs_curr_ld) { |
92 | if (nfss->pnfs_curr_ld->clear_layoutdriver) | |
93 | nfss->pnfs_curr_ld->clear_layoutdriver(nfss); | |
2a4c8994 TM |
94 | /* Decrement the MDS count. Purge the deviceid cache if zero */ |
95 | if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count)) | |
96 | nfs4_deviceid_purge_client(nfss->nfs_client); | |
02c35fca | 97 | module_put(nfss->pnfs_curr_ld->owner); |
738fd0f3 | 98 | } |
85e174ba RL |
99 | nfss->pnfs_curr_ld = NULL; |
100 | } | |
101 | ||
102 | /* | |
103 | * Try to set the server's pnfs module to the pnfs layout type specified by id. | |
104 | * Currently only one pNFS layout driver per filesystem is supported. | |
105 | * | |
106 | * @id layout type. Zero (illegal layout type) indicates pNFS not in use. | |
107 | */ | |
108 | void | |
738fd0f3 BH |
109 | set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, |
110 | u32 id) | |
85e174ba RL |
111 | { |
112 | struct pnfs_layoutdriver_type *ld_type = NULL; | |
113 | ||
114 | if (id == 0) | |
115 | goto out_no_driver; | |
116 | if (!(server->nfs_client->cl_exchange_flags & | |
117 | (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { | |
a030889a WAA |
118 | printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n", |
119 | __func__, id, server->nfs_client->cl_exchange_flags); | |
85e174ba RL |
120 | goto out_no_driver; |
121 | } | |
122 | ld_type = find_pnfs_driver(id); | |
123 | if (!ld_type) { | |
124 | request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id); | |
125 | ld_type = find_pnfs_driver(id); | |
126 | if (!ld_type) { | |
127 | dprintk("%s: No pNFS module found for %u.\n", | |
128 | __func__, id); | |
129 | goto out_no_driver; | |
130 | } | |
131 | } | |
132 | server->pnfs_curr_ld = ld_type; | |
738fd0f3 BH |
133 | if (ld_type->set_layoutdriver |
134 | && ld_type->set_layoutdriver(server, mntfh)) { | |
a030889a WAA |
135 | printk(KERN_ERR "NFS: %s: Error initializing pNFS layout " |
136 | "driver %u.\n", __func__, id); | |
738fd0f3 BH |
137 | module_put(ld_type->owner); |
138 | goto out_no_driver; | |
139 | } | |
2a4c8994 TM |
140 | /* Bump the MDS count */ |
141 | atomic_inc(&server->nfs_client->cl_mds_count); | |
ea8eecdd | 142 | |
85e174ba RL |
143 | dprintk("%s: pNFS module for %u set\n", __func__, id); |
144 | return; | |
145 | ||
146 | out_no_driver: | |
147 | dprintk("%s: Using NFSv4 I/O\n", __func__); | |
148 | server->pnfs_curr_ld = NULL; | |
149 | } | |
02c35fca FI |
150 | |
151 | int | |
152 | pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) | |
153 | { | |
154 | int status = -EINVAL; | |
155 | struct pnfs_layoutdriver_type *tmp; | |
156 | ||
157 | if (ld_type->id == 0) { | |
a030889a | 158 | printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__); |
02c35fca FI |
159 | return status; |
160 | } | |
b1f69b75 | 161 | if (!ld_type->alloc_lseg || !ld_type->free_lseg) { |
a030889a | 162 | printk(KERN_ERR "NFS: %s Layout driver must provide " |
b1f69b75 AA |
163 | "alloc_lseg and free_lseg.\n", __func__); |
164 | return status; | |
165 | } | |
02c35fca FI |
166 | |
167 | spin_lock(&pnfs_spinlock); | |
168 | tmp = find_pnfs_driver_locked(ld_type->id); | |
169 | if (!tmp) { | |
170 | list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl); | |
171 | status = 0; | |
172 | dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id, | |
173 | ld_type->name); | |
174 | } else { | |
a030889a | 175 | printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n", |
02c35fca FI |
176 | __func__, ld_type->id); |
177 | } | |
178 | spin_unlock(&pnfs_spinlock); | |
179 | ||
180 | return status; | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver); | |
183 | ||
184 | void | |
185 | pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type) | |
186 | { | |
187 | dprintk("%s Deregistering id:%u\n", __func__, ld_type->id); | |
188 | spin_lock(&pnfs_spinlock); | |
189 | list_del(&ld_type->pnfs_tblid); | |
190 | spin_unlock(&pnfs_spinlock); | |
191 | } | |
192 | EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); | |
e5e94017 | 193 | |
b1f69b75 AA |
194 | /* |
195 | * pNFS client layout cache | |
196 | */ | |
197 | ||
cc6e5340 | 198 | /* Need to hold i_lock if caller does not already hold reference */ |
43f1b3da | 199 | void |
70c3bd2b | 200 | pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo) |
e5e94017 | 201 | { |
cc6e5340 | 202 | atomic_inc(&lo->plh_refcount); |
e5e94017 BH |
203 | } |
204 | ||
636fb9c8 BH |
205 | static struct pnfs_layout_hdr * |
206 | pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags) | |
207 | { | |
208 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; | |
57934278 | 209 | return ld->alloc_layout_hdr(ino, gfp_flags); |
636fb9c8 BH |
210 | } |
211 | ||
212 | static void | |
213 | pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) | |
214 | { | |
9c626381 TM |
215 | struct nfs_server *server = NFS_SERVER(lo->plh_inode); |
216 | struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; | |
217 | ||
218 | if (!list_empty(&lo->plh_layouts)) { | |
219 | struct nfs_client *clp = server->nfs_client; | |
220 | ||
221 | spin_lock(&clp->cl_lock); | |
222 | list_del_init(&lo->plh_layouts); | |
223 | spin_unlock(&clp->cl_lock); | |
224 | } | |
9fa40758 | 225 | put_rpccred(lo->plh_lc_cred); |
57934278 | 226 | return ld->free_layout_hdr(lo); |
636fb9c8 BH |
227 | } |
228 | ||
e5e94017 | 229 | static void |
6622c3ea | 230 | pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo) |
e5e94017 | 231 | { |
bb346f63 | 232 | struct nfs_inode *nfsi = NFS_I(lo->plh_inode); |
cc6e5340 | 233 | dprintk("%s: freeing layout cache %p\n", __func__, lo); |
bb346f63 TM |
234 | nfsi->layout = NULL; |
235 | /* Reset MDS Threshold I/O counters */ | |
236 | nfsi->write_io = 0; | |
237 | nfsi->read_io = 0; | |
e5e94017 BH |
238 | } |
239 | ||
b1f69b75 | 240 | void |
70c3bd2b | 241 | pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) |
974cec8c | 242 | { |
cc6e5340 FI |
243 | struct inode *inode = lo->plh_inode; |
244 | ||
245 | if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { | |
566f8737 PT |
246 | if (!list_empty(&lo->plh_segs)) |
247 | WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); | |
6622c3ea | 248 | pnfs_detach_layout_hdr(lo); |
cc6e5340 | 249 | spin_unlock(&inode->i_lock); |
6622c3ea | 250 | pnfs_free_layout_hdr(lo); |
cc6e5340 | 251 | } |
974cec8c AA |
252 | } |
253 | ||
b9e028fd TM |
254 | static int |
255 | pnfs_iomode_to_fail_bit(u32 iomode) | |
256 | { | |
257 | return iomode == IOMODE_RW ? | |
258 | NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED; | |
259 | } | |
260 | ||
261 | static void | |
3e621214 | 262 | pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) |
b9e028fd | 263 | { |
25c75333 | 264 | lo->plh_retry_timestamp = jiffies; |
39e88fcf | 265 | if (!test_and_set_bit(fail_bit, &lo->plh_flags)) |
3e621214 TM |
266 | atomic_inc(&lo->plh_refcount); |
267 | } | |
268 | ||
269 | static void | |
270 | pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) | |
271 | { | |
272 | if (test_and_clear_bit(fail_bit, &lo->plh_flags)) | |
273 | atomic_dec(&lo->plh_refcount); | |
274 | } | |
275 | ||
276 | static void | |
277 | pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode) | |
278 | { | |
279 | struct inode *inode = lo->plh_inode; | |
115ce575 TM |
280 | struct pnfs_layout_range range = { |
281 | .iomode = iomode, | |
282 | .offset = 0, | |
283 | .length = NFS4_MAX_UINT64, | |
284 | }; | |
285 | LIST_HEAD(head); | |
3e621214 TM |
286 | |
287 | spin_lock(&inode->i_lock); | |
288 | pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode)); | |
115ce575 | 289 | pnfs_mark_matching_lsegs_invalid(lo, &head, &range); |
3e621214 | 290 | spin_unlock(&inode->i_lock); |
115ce575 | 291 | pnfs_free_lseg_list(&head); |
b9e028fd TM |
292 | dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__, |
293 | iomode == IOMODE_RW ? "RW" : "READ"); | |
294 | } | |
295 | ||
296 | static bool | |
297 | pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode) | |
298 | { | |
25c75333 | 299 | unsigned long start, end; |
3e621214 TM |
300 | int fail_bit = pnfs_iomode_to_fail_bit(iomode); |
301 | ||
302 | if (test_bit(fail_bit, &lo->plh_flags) == 0) | |
25c75333 TM |
303 | return false; |
304 | end = jiffies; | |
305 | start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT; | |
306 | if (!time_in_range(lo->plh_retry_timestamp, start, end)) { | |
307 | /* It is time to retry the failed layoutgets */ | |
3e621214 | 308 | pnfs_layout_clear_fail_bit(lo, fail_bit); |
25c75333 TM |
309 | return false; |
310 | } | |
311 | return true; | |
b9e028fd TM |
312 | } |
313 | ||
974cec8c AA |
314 | static void |
315 | init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) | |
316 | { | |
566052c5 | 317 | INIT_LIST_HEAD(&lseg->pls_list); |
a9bae566 | 318 | INIT_LIST_HEAD(&lseg->pls_lc_list); |
4541d16c FI |
319 | atomic_set(&lseg->pls_refcount, 1); |
320 | smp_mb(); | |
321 | set_bit(NFS_LSEG_VALID, &lseg->pls_flags); | |
566052c5 | 322 | lseg->pls_layout = lo; |
974cec8c AA |
323 | } |
324 | ||
905ca191 | 325 | static void pnfs_free_lseg(struct pnfs_layout_segment *lseg) |
974cec8c | 326 | { |
b7edfaa1 | 327 | struct inode *ino = lseg->pls_layout->plh_inode; |
974cec8c | 328 | |
b1f69b75 | 329 | NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); |
974cec8c AA |
330 | } |
331 | ||
d684d2ae | 332 | static void |
57036a37 TM |
333 | pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo, |
334 | struct pnfs_layout_segment *lseg) | |
d684d2ae | 335 | { |
57036a37 | 336 | struct inode *inode = lo->plh_inode; |
d684d2ae | 337 | |
d20581aa | 338 | WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); |
d684d2ae | 339 | list_del_init(&lseg->pls_list); |
8f0d27dc TM |
340 | /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */ |
341 | atomic_dec(&lo->plh_refcount); | |
173f77e9 TM |
342 | if (list_empty(&lo->plh_segs)) |
343 | clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); | |
d684d2ae FI |
344 | rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); |
345 | } | |
346 | ||
aa1e0e3a PT |
347 | /* Return true if layoutreturn is needed */ |
348 | static bool | |
349 | pnfs_layout_need_return(struct pnfs_layout_hdr *lo, | |
27b6f539 | 350 | struct pnfs_layout_segment *lseg) |
aa1e0e3a PT |
351 | { |
352 | struct pnfs_layout_segment *s; | |
353 | ||
354 | if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) | |
355 | return false; | |
356 | ||
357 | list_for_each_entry(s, &lo->plh_segs, pls_list) | |
27b6f539 | 358 | if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) |
aa1e0e3a PT |
359 | return false; |
360 | ||
aa1e0e3a PT |
361 | return true; |
362 | } | |
363 | ||
4ef2e4f8 TM |
364 | static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, |
365 | struct pnfs_layout_hdr *lo, struct inode *inode) | |
27b6f539 | 366 | { |
27b6f539 PT |
367 | lo = lseg->pls_layout; |
368 | inode = lo->plh_inode; | |
369 | ||
370 | spin_lock(&inode->i_lock); | |
371 | if (pnfs_layout_need_return(lo, lseg)) { | |
372 | nfs4_stateid stateid; | |
373 | enum pnfs_iomode iomode; | |
374 | ||
375 | stateid = lo->plh_stateid; | |
376 | iomode = lo->plh_return_iomode; | |
377 | /* decreased in pnfs_send_layoutreturn() */ | |
378 | lo->plh_block_lgets++; | |
379 | lo->plh_return_iomode = 0; | |
380 | spin_unlock(&inode->i_lock); | |
4ef2e4f8 | 381 | pnfs_get_layout_hdr(lo); |
27b6f539 | 382 | |
4ef2e4f8 TM |
383 | /* Send an async layoutreturn so we dont deadlock */ |
384 | pnfs_send_layoutreturn(lo, stateid, iomode, false); | |
27b6f539 | 385 | } else |
4ef2e4f8 | 386 | spin_unlock(&inode->i_lock); |
27b6f539 PT |
387 | } |
388 | ||
bae724ef | 389 | void |
9369a431 | 390 | pnfs_put_lseg(struct pnfs_layout_segment *lseg) |
974cec8c | 391 | { |
57036a37 | 392 | struct pnfs_layout_hdr *lo; |
d684d2ae FI |
393 | struct inode *inode; |
394 | ||
395 | if (!lseg) | |
396 | return; | |
397 | ||
4541d16c FI |
398 | dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, |
399 | atomic_read(&lseg->pls_refcount), | |
400 | test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); | |
4ef2e4f8 TM |
401 | |
402 | /* Handle the case where refcount != 1 */ | |
403 | if (atomic_add_unless(&lseg->pls_refcount, -1, 1)) | |
404 | return; | |
405 | ||
57036a37 TM |
406 | lo = lseg->pls_layout; |
407 | inode = lo->plh_inode; | |
4ef2e4f8 TM |
408 | /* Do we need a layoutreturn? */ |
409 | if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) | |
410 | pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); | |
411 | ||
d684d2ae | 412 | if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { |
8f0d27dc | 413 | pnfs_get_layout_hdr(lo); |
4ef2e4f8 TM |
414 | pnfs_layout_remove_lseg(lo, lseg); |
415 | spin_unlock(&inode->i_lock); | |
416 | pnfs_free_lseg(lseg); | |
417 | pnfs_put_layout_hdr(lo); | |
4541d16c | 418 | } |
4541d16c | 419 | } |
9369a431 | 420 | EXPORT_SYMBOL_GPL(pnfs_put_lseg); |
974cec8c | 421 | |
6543f803 | 422 | static void pnfs_free_lseg_async_work(struct work_struct *work) |
e6cf82d1 WAA |
423 | { |
424 | struct pnfs_layout_segment *lseg; | |
6543f803 | 425 | struct pnfs_layout_hdr *lo; |
e6cf82d1 WAA |
426 | |
427 | lseg = container_of(work, struct pnfs_layout_segment, pls_work); | |
6543f803 | 428 | lo = lseg->pls_layout; |
e6cf82d1 | 429 | |
6543f803 TM |
430 | pnfs_free_lseg(lseg); |
431 | pnfs_put_layout_hdr(lo); | |
e6cf82d1 WAA |
432 | } |
433 | ||
6543f803 | 434 | static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg) |
e6cf82d1 | 435 | { |
6543f803 | 436 | INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work); |
e6cf82d1 WAA |
437 | schedule_work(&lseg->pls_work); |
438 | } | |
6543f803 TM |
439 | |
440 | void | |
441 | pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg) | |
442 | { | |
443 | if (!lseg) | |
444 | return; | |
445 | ||
446 | assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock); | |
447 | ||
448 | dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, | |
449 | atomic_read(&lseg->pls_refcount), | |
450 | test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); | |
451 | if (atomic_dec_and_test(&lseg->pls_refcount)) { | |
452 | struct pnfs_layout_hdr *lo = lseg->pls_layout; | |
453 | pnfs_get_layout_hdr(lo); | |
454 | pnfs_layout_remove_lseg(lo, lseg); | |
455 | pnfs_free_lseg_async(lseg); | |
456 | } | |
457 | } | |
458 | EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked); | |
e6cf82d1 | 459 | |
3cb2df17 | 460 | static u64 |
fb3296eb BH |
461 | end_offset(u64 start, u64 len) |
462 | { | |
463 | u64 end; | |
464 | ||
465 | end = start + len; | |
466 | return end >= start ? end : NFS4_MAX_UINT64; | |
467 | } | |
468 | ||
fb3296eb BH |
469 | /* |
470 | * is l2 fully contained in l1? | |
471 | * start1 end1 | |
472 | * [----------------------------------) | |
473 | * start2 end2 | |
474 | * [----------------) | |
475 | */ | |
3cb2df17 | 476 | static bool |
7dc0ac70 | 477 | pnfs_lseg_range_contained(const struct pnfs_layout_range *l1, |
3cb2df17 | 478 | const struct pnfs_layout_range *l2) |
fb3296eb BH |
479 | { |
480 | u64 start1 = l1->offset; | |
481 | u64 end1 = end_offset(start1, l1->length); | |
482 | u64 start2 = l2->offset; | |
483 | u64 end2 = end_offset(start2, l2->length); | |
484 | ||
485 | return (start1 <= start2) && (end1 >= end2); | |
486 | } | |
487 | ||
488 | /* | |
489 | * is l1 and l2 intersecting? | |
490 | * start1 end1 | |
491 | * [----------------------------------) | |
492 | * start2 end2 | |
493 | * [----------------) | |
494 | */ | |
3cb2df17 | 495 | static bool |
7dc0ac70 | 496 | pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1, |
3cb2df17 | 497 | const struct pnfs_layout_range *l2) |
fb3296eb BH |
498 | { |
499 | u64 start1 = l1->offset; | |
500 | u64 end1 = end_offset(start1, l1->length); | |
501 | u64 start2 = l2->offset; | |
502 | u64 end2 = end_offset(start2, l2->length); | |
503 | ||
504 | return (end1 == NFS4_MAX_UINT64 || end1 > start2) && | |
505 | (end2 == NFS4_MAX_UINT64 || end2 > start1); | |
506 | } | |
507 | ||
4541d16c | 508 | static bool |
3cb2df17 TM |
509 | should_free_lseg(const struct pnfs_layout_range *lseg_range, |
510 | const struct pnfs_layout_range *recall_range) | |
4541d16c | 511 | { |
778b5502 BH |
512 | return (recall_range->iomode == IOMODE_ANY || |
513 | lseg_range->iomode == recall_range->iomode) && | |
7dc0ac70 | 514 | pnfs_lseg_range_intersecting(lseg_range, recall_range); |
974cec8c AA |
515 | } |
516 | ||
24956804 TM |
517 | static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, |
518 | struct list_head *tmp_list) | |
519 | { | |
520 | if (!atomic_dec_and_test(&lseg->pls_refcount)) | |
521 | return false; | |
522 | pnfs_layout_remove_lseg(lseg->pls_layout, lseg); | |
523 | list_add(&lseg->pls_list, tmp_list); | |
524 | return true; | |
525 | } | |
526 | ||
4541d16c FI |
527 | /* Returns 1 if lseg is removed from list, 0 otherwise */ |
528 | static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, | |
529 | struct list_head *tmp_list) | |
530 | { | |
531 | int rv = 0; | |
532 | ||
533 | if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { | |
534 | /* Remove the reference keeping the lseg in the | |
535 | * list. It will now be removed when all | |
536 | * outstanding io is finished. | |
537 | */ | |
d684d2ae FI |
538 | dprintk("%s: lseg %p ref %d\n", __func__, lseg, |
539 | atomic_read(&lseg->pls_refcount)); | |
24956804 | 540 | if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list)) |
d684d2ae | 541 | rv = 1; |
4541d16c FI |
542 | } |
543 | return rv; | |
544 | } | |
545 | ||
546 | /* Returns count of number of matching invalid lsegs remaining in list | |
547 | * after call. | |
548 | */ | |
43f1b3da | 549 | int |
49a85061 | 550 | pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, |
4541d16c | 551 | struct list_head *tmp_list, |
778b5502 | 552 | struct pnfs_layout_range *recall_range) |
974cec8c AA |
553 | { |
554 | struct pnfs_layout_segment *lseg, *next; | |
4541d16c | 555 | int invalid = 0, removed = 0; |
974cec8c AA |
556 | |
557 | dprintk("%s:Begin lo %p\n", __func__, lo); | |
558 | ||
8006bfba | 559 | if (list_empty(&lo->plh_segs)) |
38511722 | 560 | return 0; |
4541d16c | 561 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) |
778b5502 BH |
562 | if (!recall_range || |
563 | should_free_lseg(&lseg->pls_range, recall_range)) { | |
4541d16c FI |
564 | dprintk("%s: freeing lseg %p iomode %d " |
565 | "offset %llu length %llu\n", __func__, | |
566 | lseg, lseg->pls_range.iomode, lseg->pls_range.offset, | |
567 | lseg->pls_range.length); | |
568 | invalid++; | |
569 | removed += mark_lseg_invalid(lseg, tmp_list); | |
570 | } | |
571 | dprintk("%s:Return %i\n", __func__, invalid - removed); | |
572 | return invalid - removed; | |
974cec8c AA |
573 | } |
574 | ||
f49f9baa | 575 | /* note free_me must contain lsegs from a single layout_hdr */ |
43f1b3da | 576 | void |
4541d16c | 577 | pnfs_free_lseg_list(struct list_head *free_me) |
974cec8c | 578 | { |
4541d16c | 579 | struct pnfs_layout_segment *lseg, *tmp; |
f49f9baa FI |
580 | |
581 | if (list_empty(free_me)) | |
582 | return; | |
583 | ||
4541d16c | 584 | list_for_each_entry_safe(lseg, tmp, free_me, pls_list) { |
566052c5 | 585 | list_del(&lseg->pls_list); |
905ca191 | 586 | pnfs_free_lseg(lseg); |
974cec8c AA |
587 | } |
588 | } | |
589 | ||
e5e94017 BH |
590 | void |
591 | pnfs_destroy_layout(struct nfs_inode *nfsi) | |
592 | { | |
593 | struct pnfs_layout_hdr *lo; | |
974cec8c | 594 | LIST_HEAD(tmp_list); |
e5e94017 BH |
595 | |
596 | spin_lock(&nfsi->vfs_inode.i_lock); | |
597 | lo = nfsi->layout; | |
598 | if (lo) { | |
38511722 | 599 | lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ |
49a85061 | 600 | pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); |
3e621214 TM |
601 | pnfs_get_layout_hdr(lo); |
602 | pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED); | |
603 | pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED); | |
c829013d | 604 | pnfs_clear_retry_layoutget(lo); |
3e621214 TM |
605 | spin_unlock(&nfsi->vfs_inode.i_lock); |
606 | pnfs_free_lseg_list(&tmp_list); | |
607 | pnfs_put_layout_hdr(lo); | |
608 | } else | |
609 | spin_unlock(&nfsi->vfs_inode.i_lock); | |
974cec8c | 610 | } |
041245c8 | 611 | EXPORT_SYMBOL_GPL(pnfs_destroy_layout); |
974cec8c | 612 | |
fd9a8d71 TM |
613 | static bool |
614 | pnfs_layout_add_bulk_destroy_list(struct inode *inode, | |
615 | struct list_head *layout_list) | |
974cec8c AA |
616 | { |
617 | struct pnfs_layout_hdr *lo; | |
fd9a8d71 | 618 | bool ret = false; |
974cec8c | 619 | |
fd9a8d71 TM |
620 | spin_lock(&inode->i_lock); |
621 | lo = NFS_I(inode)->layout; | |
622 | if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) { | |
623 | pnfs_get_layout_hdr(lo); | |
624 | list_add(&lo->plh_bulk_destroy, layout_list); | |
625 | ret = true; | |
626 | } | |
627 | spin_unlock(&inode->i_lock); | |
628 | return ret; | |
629 | } | |
630 | ||
631 | /* Caller must hold rcu_read_lock and clp->cl_lock */ | |
632 | static int | |
633 | pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, | |
634 | struct nfs_server *server, | |
635 | struct list_head *layout_list) | |
636 | { | |
637 | struct pnfs_layout_hdr *lo, *next; | |
638 | struct inode *inode; | |
639 | ||
640 | list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) { | |
641 | inode = igrab(lo->plh_inode); | |
642 | if (inode == NULL) | |
643 | continue; | |
644 | list_del_init(&lo->plh_layouts); | |
645 | if (pnfs_layout_add_bulk_destroy_list(inode, layout_list)) | |
646 | continue; | |
647 | rcu_read_unlock(); | |
648 | spin_unlock(&clp->cl_lock); | |
649 | iput(inode); | |
650 | spin_lock(&clp->cl_lock); | |
651 | rcu_read_lock(); | |
652 | return -EAGAIN; | |
653 | } | |
654 | return 0; | |
655 | } | |
656 | ||
657 | static int | |
658 | pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list, | |
659 | bool is_bulk_recall) | |
660 | { | |
661 | struct pnfs_layout_hdr *lo; | |
662 | struct inode *inode; | |
663 | struct pnfs_layout_range range = { | |
664 | .iomode = IOMODE_ANY, | |
665 | .offset = 0, | |
666 | .length = NFS4_MAX_UINT64, | |
667 | }; | |
668 | LIST_HEAD(lseg_list); | |
669 | int ret = 0; | |
670 | ||
671 | while (!list_empty(layout_list)) { | |
672 | lo = list_entry(layout_list->next, struct pnfs_layout_hdr, | |
673 | plh_bulk_destroy); | |
674 | dprintk("%s freeing layout for inode %lu\n", __func__, | |
675 | lo->plh_inode->i_ino); | |
676 | inode = lo->plh_inode; | |
7c5d1875 CH |
677 | |
678 | pnfs_layoutcommit_inode(inode, false); | |
679 | ||
fd9a8d71 TM |
680 | spin_lock(&inode->i_lock); |
681 | list_del_init(&lo->plh_bulk_destroy); | |
682 | lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ | |
683 | if (is_bulk_recall) | |
684 | set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); | |
685 | if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range)) | |
686 | ret = -EAGAIN; | |
687 | spin_unlock(&inode->i_lock); | |
688 | pnfs_free_lseg_list(&lseg_list); | |
689 | pnfs_put_layout_hdr(lo); | |
690 | iput(inode); | |
691 | } | |
692 | return ret; | |
693 | } | |
694 | ||
695 | int | |
696 | pnfs_destroy_layouts_byfsid(struct nfs_client *clp, | |
697 | struct nfs_fsid *fsid, | |
698 | bool is_recall) | |
699 | { | |
700 | struct nfs_server *server; | |
701 | LIST_HEAD(layout_list); | |
c47abcf8 | 702 | |
974cec8c | 703 | spin_lock(&clp->cl_lock); |
6382a441 | 704 | rcu_read_lock(); |
fd9a8d71 | 705 | restart: |
6382a441 | 706 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
fd9a8d71 TM |
707 | if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0) |
708 | continue; | |
709 | if (pnfs_layout_bulk_destroy_byserver_locked(clp, | |
710 | server, | |
711 | &layout_list) != 0) | |
712 | goto restart; | |
6382a441 WAA |
713 | } |
714 | rcu_read_unlock(); | |
974cec8c AA |
715 | spin_unlock(&clp->cl_lock); |
716 | ||
fd9a8d71 TM |
717 | if (list_empty(&layout_list)) |
718 | return 0; | |
719 | return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall); | |
720 | } | |
721 | ||
722 | int | |
723 | pnfs_destroy_layouts_byclid(struct nfs_client *clp, | |
724 | bool is_recall) | |
725 | { | |
726 | struct nfs_server *server; | |
727 | LIST_HEAD(layout_list); | |
728 | ||
729 | spin_lock(&clp->cl_lock); | |
730 | rcu_read_lock(); | |
731 | restart: | |
732 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | |
733 | if (pnfs_layout_bulk_destroy_byserver_locked(clp, | |
734 | server, | |
735 | &layout_list) != 0) | |
736 | goto restart; | |
974cec8c | 737 | } |
fd9a8d71 TM |
738 | rcu_read_unlock(); |
739 | spin_unlock(&clp->cl_lock); | |
740 | ||
741 | if (list_empty(&layout_list)) | |
742 | return 0; | |
743 | return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall); | |
744 | } | |
745 | ||
746 | /* | |
747 | * Called by the state manger to remove all layouts established under an | |
748 | * expired lease. | |
749 | */ | |
750 | void | |
751 | pnfs_destroy_all_layouts(struct nfs_client *clp) | |
752 | { | |
753 | nfs4_deviceid_mark_client_invalid(clp); | |
754 | nfs4_deviceid_purge_client(clp); | |
755 | ||
756 | pnfs_destroy_layouts_byclid(clp, false); | |
e5e94017 BH |
757 | } |
758 | ||
5a65503f TM |
759 | /* |
760 | * Compare 2 layout stateid sequence ids, to see which is newer, | |
761 | * taking into account wraparound issues. | |
762 | */ | |
763 | static bool pnfs_seqid_is_newer(u32 s1, u32 s2) | |
764 | { | |
2c64c57d | 765 | return (s32)(s1 - s2) > 0; |
5a65503f TM |
766 | } |
767 | ||
fd6002e9 | 768 | /* update lo->plh_stateid with new if is more recent */ |
43f1b3da FI |
769 | void |
770 | pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, | |
771 | bool update_barrier) | |
b1f69b75 | 772 | { |
22aaf714 TM |
773 | u32 oldseq, newseq, new_barrier; |
774 | int empty = list_empty(&lo->plh_segs); | |
b1f69b75 | 775 | |
2d2f24ad TM |
776 | oldseq = be32_to_cpu(lo->plh_stateid.seqid); |
777 | newseq = be32_to_cpu(new->seqid); | |
22aaf714 | 778 | if (empty || pnfs_seqid_is_newer(newseq, oldseq)) { |
f597c537 | 779 | nfs4_stateid_copy(&lo->plh_stateid, new); |
43f1b3da | 780 | if (update_barrier) { |
22aaf714 | 781 | new_barrier = be32_to_cpu(new->seqid); |
43f1b3da FI |
782 | } else { |
783 | /* Because of wraparound, we want to keep the barrier | |
22aaf714 | 784 | * "close" to the current seqids. |
43f1b3da | 785 | */ |
22aaf714 | 786 | new_barrier = newseq - atomic_read(&lo->plh_outstanding); |
43f1b3da | 787 | } |
22aaf714 TM |
788 | if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier)) |
789 | lo->plh_barrier = new_barrier; | |
43f1b3da | 790 | } |
b1f69b75 AA |
791 | } |
792 | ||
cf7d63f1 | 793 | static bool |
19c54aba TM |
794 | pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo, |
795 | const nfs4_stateid *stateid) | |
43f1b3da | 796 | { |
19c54aba | 797 | u32 seqid = be32_to_cpu(stateid->seqid); |
25a1a621 | 798 | |
19c54aba TM |
799 | return !pnfs_seqid_is_newer(seqid, lo->plh_barrier); |
800 | } | |
801 | ||
ce6ab4f2 PT |
802 | static bool |
803 | pnfs_layout_returning(const struct pnfs_layout_hdr *lo, | |
804 | struct pnfs_layout_range *range) | |
805 | { | |
806 | return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) && | |
807 | (lo->plh_return_iomode == IOMODE_ANY || | |
808 | lo->plh_return_iomode == range->iomode); | |
809 | } | |
810 | ||
19c54aba TM |
811 | /* lget is set to 1 if called from inside send_layoutget call chain */ |
812 | static bool | |
ce6ab4f2 PT |
813 | pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, |
814 | struct pnfs_layout_range *range, int lget) | |
19c54aba | 815 | { |
f7e8917a FI |
816 | return lo->plh_block_lgets || |
817 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || | |
43f1b3da | 818 | (list_empty(&lo->plh_segs) && |
ce6ab4f2 PT |
819 | (atomic_read(&lo->plh_outstanding) > lget)) || |
820 | pnfs_layout_returning(lo, range); | |
cf7d63f1 FI |
821 | } |
822 | ||
fd6002e9 FI |
823 | int |
824 | pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, | |
ce6ab4f2 | 825 | struct pnfs_layout_range *range, |
fd6002e9 | 826 | struct nfs4_state *open_state) |
b1f69b75 | 827 | { |
fd6002e9 | 828 | int status = 0; |
974cec8c | 829 | |
b1f69b75 | 830 | dprintk("--> %s\n", __func__); |
fd6002e9 | 831 | spin_lock(&lo->plh_inode->i_lock); |
ce6ab4f2 | 832 | if (pnfs_layoutgets_blocked(lo, range, 1)) { |
cf7d63f1 | 833 | status = -EAGAIN; |
5d422301 TM |
834 | } else if (!nfs4_valid_open_stateid(open_state)) { |
835 | status = -EBADF; | |
47abadef CH |
836 | } else if (list_empty(&lo->plh_segs) || |
837 | test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) { | |
fd6002e9 FI |
838 | int seq; |
839 | ||
840 | do { | |
841 | seq = read_seqbegin(&open_state->seqlock); | |
f597c537 | 842 | nfs4_stateid_copy(dst, &open_state->stateid); |
fd6002e9 FI |
843 | } while (read_seqretry(&open_state->seqlock, seq)); |
844 | } else | |
f597c537 | 845 | nfs4_stateid_copy(dst, &lo->plh_stateid); |
fd6002e9 | 846 | spin_unlock(&lo->plh_inode->i_lock); |
b1f69b75 | 847 | dprintk("<-- %s\n", __func__); |
fd6002e9 | 848 | return status; |
b1f69b75 AA |
849 | } |
850 | ||
851 | /* | |
852 | * Get layout from server. | |
853 | * for now, assume that whole file layouts are requested. | |
854 | * arg->offset: 0 | |
855 | * arg->length: all ones | |
856 | */ | |
e5e94017 BH |
857 | static struct pnfs_layout_segment * |
858 | send_layoutget(struct pnfs_layout_hdr *lo, | |
859 | struct nfs_open_context *ctx, | |
fb3296eb | 860 | struct pnfs_layout_range *range, |
a75b9df9 | 861 | gfp_t gfp_flags) |
e5e94017 | 862 | { |
b7edfaa1 | 863 | struct inode *ino = lo->plh_inode; |
b1f69b75 AA |
864 | struct nfs_server *server = NFS_SERVER(ino); |
865 | struct nfs4_layoutget *lgp; | |
a0b0a6e3 | 866 | struct pnfs_layout_segment *lseg; |
b1f69b75 AA |
867 | |
868 | dprintk("--> %s\n", __func__); | |
e5e94017 | 869 | |
a75b9df9 | 870 | lgp = kzalloc(sizeof(*lgp), gfp_flags); |
cf7d63f1 | 871 | if (lgp == NULL) |
b1f69b75 | 872 | return NULL; |
35124a09 | 873 | |
fb3296eb BH |
874 | lgp->args.minlength = PAGE_CACHE_SIZE; |
875 | if (lgp->args.minlength > range->length) | |
876 | lgp->args.minlength = range->length; | |
b1f69b75 | 877 | lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; |
fb3296eb | 878 | lgp->args.range = *range; |
b1f69b75 AA |
879 | lgp->args.type = server->pnfs_curr_ld->id; |
880 | lgp->args.inode = ino; | |
881 | lgp->args.ctx = get_nfs_open_context(ctx); | |
a75b9df9 | 882 | lgp->gfp_flags = gfp_flags; |
6ab59344 | 883 | lgp->cred = lo->plh_lc_cred; |
b1f69b75 AA |
884 | |
885 | /* Synchronously retrieve layout information from server and | |
886 | * store in lseg. | |
887 | */ | |
a0b0a6e3 TM |
888 | lseg = nfs4_proc_layoutget(lgp, gfp_flags); |
889 | if (IS_ERR(lseg)) { | |
890 | switch (PTR_ERR(lseg)) { | |
891 | case -ENOMEM: | |
892 | case -ERESTARTSYS: | |
893 | break; | |
894 | default: | |
895 | /* remember that LAYOUTGET failed and suspend trying */ | |
b9e028fd | 896 | pnfs_layout_io_set_failed(lo, range->iomode); |
a0b0a6e3 TM |
897 | } |
898 | return NULL; | |
d67ae825 TH |
899 | } else |
900 | pnfs_layout_clear_fail_bit(lo, | |
901 | pnfs_iomode_to_fail_bit(range->iomode)); | |
35124a09 | 902 | |
974cec8c AA |
903 | return lseg; |
904 | } | |
905 | ||
24956804 TM |
906 | static void pnfs_clear_layoutcommit(struct inode *inode, |
907 | struct list_head *head) | |
908 | { | |
909 | struct nfs_inode *nfsi = NFS_I(inode); | |
910 | struct pnfs_layout_segment *lseg, *tmp; | |
911 | ||
912 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) | |
913 | return; | |
914 | list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) { | |
915 | if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) | |
916 | continue; | |
917 | pnfs_lseg_dec_and_remove_zero(lseg, head); | |
918 | } | |
919 | } | |
920 | ||
d67ae825 TH |
921 | void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) |
922 | { | |
923 | clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); | |
924 | smp_mb__after_atomic(); | |
925 | wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); | |
926 | } | |
927 | ||
f40eb5d0 PT |
928 | static int |
929 | pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, | |
6c16605d | 930 | enum pnfs_iomode iomode, bool sync) |
f40eb5d0 PT |
931 | { |
932 | struct inode *ino = lo->plh_inode; | |
933 | struct nfs4_layoutreturn *lrp; | |
934 | int status = 0; | |
935 | ||
e4af440a | 936 | lrp = kzalloc(sizeof(*lrp), GFP_NOFS); |
f40eb5d0 PT |
937 | if (unlikely(lrp == NULL)) { |
938 | status = -ENOMEM; | |
939 | spin_lock(&ino->i_lock); | |
940 | lo->plh_block_lgets--; | |
d67ae825 | 941 | pnfs_clear_layoutreturn_waitbit(lo); |
193e3aa2 | 942 | rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq); |
f40eb5d0 PT |
943 | spin_unlock(&ino->i_lock); |
944 | pnfs_put_layout_hdr(lo); | |
945 | goto out; | |
946 | } | |
947 | ||
948 | lrp->args.stateid = stateid; | |
949 | lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; | |
950 | lrp->args.inode = ino; | |
15eb67c1 PT |
951 | lrp->args.range.iomode = iomode; |
952 | lrp->args.range.offset = 0; | |
953 | lrp->args.range.length = NFS4_MAX_UINT64; | |
f40eb5d0 PT |
954 | lrp->args.layout = lo; |
955 | lrp->clp = NFS_SERVER(ino)->nfs_client; | |
956 | lrp->cred = lo->plh_lc_cred; | |
957 | ||
6c16605d | 958 | status = nfs4_proc_layoutreturn(lrp, sync); |
f40eb5d0 PT |
959 | out: |
960 | dprintk("<-- %s status: %d\n", __func__, status); | |
961 | return status; | |
962 | } | |
963 | ||
293b3b06 AA |
964 | /* |
965 | * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr | |
966 | * when the layout segment list is empty. | |
967 | * | |
968 | * Note that a pnfs_layout_hdr can exist with an empty layout segment | |
969 | * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the | |
970 | * deviceid is marked invalid. | |
971 | */ | |
cbe82603 BH |
972 | int |
973 | _pnfs_return_layout(struct inode *ino) | |
974 | { | |
975 | struct pnfs_layout_hdr *lo = NULL; | |
976 | struct nfs_inode *nfsi = NFS_I(ino); | |
977 | LIST_HEAD(tmp_list); | |
cbe82603 | 978 | nfs4_stateid stateid; |
293b3b06 | 979 | int status = 0, empty; |
cbe82603 | 980 | |
366d5052 | 981 | dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); |
cbe82603 BH |
982 | |
983 | spin_lock(&ino->i_lock); | |
984 | lo = nfsi->layout; | |
e5929f3c | 985 | if (!lo) { |
cbe82603 | 986 | spin_unlock(&ino->i_lock); |
293b3b06 AA |
987 | dprintk("NFS: %s no layout to return\n", __func__); |
988 | goto out; | |
cbe82603 BH |
989 | } |
990 | stateid = nfsi->layout->plh_stateid; | |
991 | /* Reference matched in nfs4_layoutreturn_release */ | |
70c3bd2b | 992 | pnfs_get_layout_hdr(lo); |
293b3b06 | 993 | empty = list_empty(&lo->plh_segs); |
24956804 | 994 | pnfs_clear_layoutcommit(ino, &tmp_list); |
49a85061 | 995 | pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); |
c88953d8 CH |
996 | |
997 | if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { | |
998 | struct pnfs_layout_range range = { | |
999 | .iomode = IOMODE_ANY, | |
1000 | .offset = 0, | |
1001 | .length = NFS4_MAX_UINT64, | |
1002 | }; | |
1003 | NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range); | |
1004 | } | |
1005 | ||
293b3b06 AA |
1006 | /* Don't send a LAYOUTRETURN if list was initially empty */ |
1007 | if (empty) { | |
1008 | spin_unlock(&ino->i_lock); | |
70c3bd2b | 1009 | pnfs_put_layout_hdr(lo); |
293b3b06 AA |
1010 | dprintk("NFS: %s no layout segments to return\n", __func__); |
1011 | goto out; | |
1012 | } | |
47abadef CH |
1013 | |
1014 | set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); | |
ea0ded74 | 1015 | lo->plh_block_lgets++; |
cbe82603 BH |
1016 | spin_unlock(&ino->i_lock); |
1017 | pnfs_free_lseg_list(&tmp_list); | |
1018 | ||
6c16605d | 1019 | status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); |
cbe82603 BH |
1020 | out: |
1021 | dprintk("<-- %s status: %d\n", __func__, status); | |
1022 | return status; | |
1023 | } | |
0a57cdac | 1024 | EXPORT_SYMBOL_GPL(_pnfs_return_layout); |
cbe82603 | 1025 | |
24028672 TM |
1026 | int |
1027 | pnfs_commit_and_return_layout(struct inode *inode) | |
1028 | { | |
1029 | struct pnfs_layout_hdr *lo; | |
1030 | int ret; | |
1031 | ||
1032 | spin_lock(&inode->i_lock); | |
1033 | lo = NFS_I(inode)->layout; | |
1034 | if (lo == NULL) { | |
1035 | spin_unlock(&inode->i_lock); | |
1036 | return 0; | |
1037 | } | |
1038 | pnfs_get_layout_hdr(lo); | |
1039 | /* Block new layoutgets and read/write to ds */ | |
1040 | lo->plh_block_lgets++; | |
1041 | spin_unlock(&inode->i_lock); | |
1042 | filemap_fdatawait(inode->i_mapping); | |
1043 | ret = pnfs_layoutcommit_inode(inode, true); | |
1044 | if (ret == 0) | |
1045 | ret = _pnfs_return_layout(inode); | |
1046 | spin_lock(&inode->i_lock); | |
1047 | lo->plh_block_lgets--; | |
1048 | spin_unlock(&inode->i_lock); | |
1049 | pnfs_put_layout_hdr(lo); | |
1050 | return ret; | |
1051 | } | |
1052 | ||
f7e8917a FI |
1053 | bool pnfs_roc(struct inode *ino) |
1054 | { | |
40dd4b7a TM |
1055 | struct nfs_inode *nfsi = NFS_I(ino); |
1056 | struct nfs_open_context *ctx; | |
1057 | struct nfs4_state *state; | |
f7e8917a FI |
1058 | struct pnfs_layout_hdr *lo; |
1059 | struct pnfs_layout_segment *lseg, *tmp; | |
193e3aa2 | 1060 | nfs4_stateid stateid; |
f7e8917a | 1061 | LIST_HEAD(tmp_list); |
193e3aa2 | 1062 | bool found = false, layoutreturn = false; |
f7e8917a FI |
1063 | |
1064 | spin_lock(&ino->i_lock); | |
40dd4b7a | 1065 | lo = nfsi->layout; |
f7e8917a FI |
1066 | if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || |
1067 | test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) | |
40dd4b7a TM |
1068 | goto out_noroc; |
1069 | ||
1070 | /* Don't return layout if we hold a delegation */ | |
1071 | if (nfs4_check_delegation(ino, FMODE_READ)) | |
1072 | goto out_noroc; | |
1073 | ||
1074 | list_for_each_entry(ctx, &nfsi->open_files, list) { | |
1075 | state = ctx->state; | |
1076 | /* Don't return layout if there is open file state */ | |
1077 | if (state != NULL && state->state != 0) | |
1078 | goto out_noroc; | |
1079 | } | |
1080 | ||
c829013d | 1081 | pnfs_clear_retry_layoutget(lo); |
f7e8917a FI |
1082 | list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) |
1083 | if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { | |
1084 | mark_lseg_invalid(lseg, &tmp_list); | |
1085 | found = true; | |
1086 | } | |
1087 | if (!found) | |
40dd4b7a | 1088 | goto out_noroc; |
f7e8917a | 1089 | lo->plh_block_lgets++; |
70c3bd2b | 1090 | pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */ |
f7e8917a FI |
1091 | spin_unlock(&ino->i_lock); |
1092 | pnfs_free_lseg_list(&tmp_list); | |
7140171e | 1093 | pnfs_layoutcommit_inode(ino, true); |
f7e8917a FI |
1094 | return true; |
1095 | ||
40dd4b7a | 1096 | out_noroc: |
193e3aa2 PT |
1097 | if (lo) { |
1098 | stateid = lo->plh_stateid; | |
1099 | layoutreturn = | |
1100 | test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, | |
1101 | &lo->plh_flags); | |
1102 | if (layoutreturn) { | |
1103 | lo->plh_block_lgets++; | |
1104 | pnfs_get_layout_hdr(lo); | |
1105 | } | |
1106 | } | |
f7e8917a | 1107 | spin_unlock(&ino->i_lock); |
7140171e TM |
1108 | if (layoutreturn) { |
1109 | pnfs_layoutcommit_inode(ino, true); | |
27b6f539 | 1110 | pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); |
7140171e | 1111 | } |
f7e8917a FI |
1112 | return false; |
1113 | } | |
1114 | ||
1115 | void pnfs_roc_release(struct inode *ino) | |
1116 | { | |
1117 | struct pnfs_layout_hdr *lo; | |
1118 | ||
1119 | spin_lock(&ino->i_lock); | |
1120 | lo = NFS_I(ino)->layout; | |
1121 | lo->plh_block_lgets--; | |
6622c3ea TM |
1122 | if (atomic_dec_and_test(&lo->plh_refcount)) { |
1123 | pnfs_detach_layout_hdr(lo); | |
1124 | spin_unlock(&ino->i_lock); | |
1125 | pnfs_free_layout_hdr(lo); | |
1126 | } else | |
1127 | spin_unlock(&ino->i_lock); | |
f7e8917a FI |
1128 | } |
1129 | ||
1130 | void pnfs_roc_set_barrier(struct inode *ino, u32 barrier) | |
1131 | { | |
1132 | struct pnfs_layout_hdr *lo; | |
1133 | ||
1134 | spin_lock(&ino->i_lock); | |
1135 | lo = NFS_I(ino)->layout; | |
0f35ad6f | 1136 | if (pnfs_seqid_is_newer(barrier, lo->plh_barrier)) |
f7e8917a FI |
1137 | lo->plh_barrier = barrier; |
1138 | spin_unlock(&ino->i_lock); | |
1139 | } | |
1140 | ||
7fdab069 | 1141 | bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) |
f7e8917a FI |
1142 | { |
1143 | struct nfs_inode *nfsi = NFS_I(ino); | |
7fdab069 | 1144 | struct pnfs_layout_hdr *lo; |
f7e8917a | 1145 | struct pnfs_layout_segment *lseg; |
193e3aa2 | 1146 | nfs4_stateid stateid; |
7fdab069 | 1147 | u32 current_seqid; |
193e3aa2 | 1148 | bool found = false, layoutreturn = false; |
f7e8917a FI |
1149 | |
1150 | spin_lock(&ino->i_lock); | |
1151 | list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) | |
1152 | if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { | |
7fdab069 | 1153 | rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); |
f7e8917a | 1154 | found = true; |
7fdab069 | 1155 | goto out; |
f7e8917a | 1156 | } |
7fdab069 TM |
1157 | lo = nfsi->layout; |
1158 | current_seqid = be32_to_cpu(lo->plh_stateid.seqid); | |
f7e8917a | 1159 | |
7fdab069 TM |
1160 | /* Since close does not return a layout stateid for use as |
1161 | * a barrier, we choose the worst-case barrier. | |
1162 | */ | |
1163 | *barrier = current_seqid + atomic_read(&lo->plh_outstanding); | |
1164 | out: | |
193e3aa2 PT |
1165 | if (!found) { |
1166 | stateid = lo->plh_stateid; | |
1167 | layoutreturn = | |
1168 | test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, | |
1169 | &lo->plh_flags); | |
1170 | if (layoutreturn) { | |
1171 | lo->plh_block_lgets++; | |
1172 | pnfs_get_layout_hdr(lo); | |
1173 | } | |
1174 | } | |
f7e8917a | 1175 | spin_unlock(&ino->i_lock); |
193e3aa2 PT |
1176 | if (layoutreturn) { |
1177 | rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); | |
27b6f539 | 1178 | pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); |
193e3aa2 | 1179 | } |
f7e8917a FI |
1180 | return found; |
1181 | } | |
1182 | ||
b1f69b75 AA |
1183 | /* |
1184 | * Compare two layout segments for sorting into layout cache. | |
1185 | * We want to preferentially return RW over RO layouts, so ensure those | |
1186 | * are seen first. | |
1187 | */ | |
1188 | static s64 | |
7dc0ac70 | 1189 | pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1, |
3cb2df17 | 1190 | const struct pnfs_layout_range *l2) |
b1f69b75 | 1191 | { |
fb3296eb BH |
1192 | s64 d; |
1193 | ||
1194 | /* high offset > low offset */ | |
1195 | d = l1->offset - l2->offset; | |
1196 | if (d) | |
1197 | return d; | |
1198 | ||
1199 | /* short length > long length */ | |
1200 | d = l2->length - l1->length; | |
1201 | if (d) | |
1202 | return d; | |
1203 | ||
b1f69b75 | 1204 | /* read > read/write */ |
fb3296eb | 1205 | return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ); |
b1f69b75 AA |
1206 | } |
1207 | ||
974cec8c | 1208 | static void |
57036a37 | 1209 | pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo, |
974cec8c AA |
1210 | struct pnfs_layout_segment *lseg) |
1211 | { | |
b1f69b75 | 1212 | struct pnfs_layout_segment *lp; |
b1f69b75 | 1213 | |
974cec8c AA |
1214 | dprintk("%s:Begin\n", __func__); |
1215 | ||
b7edfaa1 | 1216 | list_for_each_entry(lp, &lo->plh_segs, pls_list) { |
7dc0ac70 | 1217 | if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0) |
b1f69b75 | 1218 | continue; |
566052c5 | 1219 | list_add_tail(&lseg->pls_list, &lp->pls_list); |
b1f69b75 AA |
1220 | dprintk("%s: inserted lseg %p " |
1221 | "iomode %d offset %llu length %llu before " | |
1222 | "lp %p iomode %d offset %llu length %llu\n", | |
566052c5 FI |
1223 | __func__, lseg, lseg->pls_range.iomode, |
1224 | lseg->pls_range.offset, lseg->pls_range.length, | |
1225 | lp, lp->pls_range.iomode, lp->pls_range.offset, | |
1226 | lp->pls_range.length); | |
fb3296eb | 1227 | goto out; |
974cec8c | 1228 | } |
fb3296eb BH |
1229 | list_add_tail(&lseg->pls_list, &lo->plh_segs); |
1230 | dprintk("%s: inserted lseg %p " | |
1231 | "iomode %d offset %llu length %llu at tail\n", | |
1232 | __func__, lseg, lseg->pls_range.iomode, | |
1233 | lseg->pls_range.offset, lseg->pls_range.length); | |
1234 | out: | |
70c3bd2b | 1235 | pnfs_get_layout_hdr(lo); |
974cec8c AA |
1236 | |
1237 | dprintk("%s:Return\n", __func__); | |
e5e94017 BH |
1238 | } |
1239 | ||
1240 | static struct pnfs_layout_hdr * | |
9fa40758 PT |
1241 | alloc_init_layout_hdr(struct inode *ino, |
1242 | struct nfs_open_context *ctx, | |
1243 | gfp_t gfp_flags) | |
e5e94017 BH |
1244 | { |
1245 | struct pnfs_layout_hdr *lo; | |
1246 | ||
636fb9c8 | 1247 | lo = pnfs_alloc_layout_hdr(ino, gfp_flags); |
e5e94017 BH |
1248 | if (!lo) |
1249 | return NULL; | |
cc6e5340 | 1250 | atomic_set(&lo->plh_refcount, 1); |
b7edfaa1 FI |
1251 | INIT_LIST_HEAD(&lo->plh_layouts); |
1252 | INIT_LIST_HEAD(&lo->plh_segs); | |
fd9a8d71 | 1253 | INIT_LIST_HEAD(&lo->plh_bulk_destroy); |
b7edfaa1 | 1254 | lo->plh_inode = ino; |
5cc2216d | 1255 | lo->plh_lc_cred = get_rpccred(ctx->cred); |
e5e94017 BH |
1256 | return lo; |
1257 | } | |
1258 | ||
1259 | static struct pnfs_layout_hdr * | |
9fa40758 PT |
1260 | pnfs_find_alloc_layout(struct inode *ino, |
1261 | struct nfs_open_context *ctx, | |
1262 | gfp_t gfp_flags) | |
e5e94017 BH |
1263 | { |
1264 | struct nfs_inode *nfsi = NFS_I(ino); | |
1265 | struct pnfs_layout_hdr *new = NULL; | |
1266 | ||
1267 | dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); | |
1268 | ||
251ec410 TM |
1269 | if (nfsi->layout != NULL) |
1270 | goto out_existing; | |
e5e94017 | 1271 | spin_unlock(&ino->i_lock); |
9fa40758 | 1272 | new = alloc_init_layout_hdr(ino, ctx, gfp_flags); |
e5e94017 BH |
1273 | spin_lock(&ino->i_lock); |
1274 | ||
251ec410 | 1275 | if (likely(nfsi->layout == NULL)) { /* Won the race? */ |
e5e94017 | 1276 | nfsi->layout = new; |
251ec410 | 1277 | return new; |
7175fe90 YN |
1278 | } else if (new != NULL) |
1279 | pnfs_free_layout_hdr(new); | |
251ec410 TM |
1280 | out_existing: |
1281 | pnfs_get_layout_hdr(nfsi->layout); | |
e5e94017 BH |
1282 | return nfsi->layout; |
1283 | } | |
1284 | ||
b1f69b75 AA |
1285 | /* |
1286 | * iomode matching rules: | |
1287 | * iomode lseg match | |
1288 | * ----- ----- ----- | |
1289 | * ANY READ true | |
1290 | * ANY RW true | |
1291 | * RW READ false | |
1292 | * RW RW true | |
1293 | * READ READ true | |
1294 | * READ RW true | |
1295 | */ | |
3cb2df17 | 1296 | static bool |
7dc0ac70 | 1297 | pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range, |
3cb2df17 | 1298 | const struct pnfs_layout_range *range) |
b1f69b75 | 1299 | { |
fb3296eb BH |
1300 | struct pnfs_layout_range range1; |
1301 | ||
1302 | if ((range->iomode == IOMODE_RW && | |
1303 | ls_range->iomode != IOMODE_RW) || | |
7dc0ac70 | 1304 | !pnfs_lseg_range_intersecting(ls_range, range)) |
fb3296eb BH |
1305 | return 0; |
1306 | ||
1307 | /* range1 covers only the first byte in the range */ | |
1308 | range1 = *range; | |
1309 | range1.length = 1; | |
7dc0ac70 | 1310 | return pnfs_lseg_range_contained(ls_range, &range1); |
b1f69b75 AA |
1311 | } |
1312 | ||
1313 | /* | |
1314 | * lookup range in layout | |
1315 | */ | |
e5e94017 | 1316 | static struct pnfs_layout_segment * |
fb3296eb BH |
1317 | pnfs_find_lseg(struct pnfs_layout_hdr *lo, |
1318 | struct pnfs_layout_range *range) | |
e5e94017 | 1319 | { |
b1f69b75 AA |
1320 | struct pnfs_layout_segment *lseg, *ret = NULL; |
1321 | ||
1322 | dprintk("%s:Begin\n", __func__); | |
1323 | ||
b7edfaa1 | 1324 | list_for_each_entry(lseg, &lo->plh_segs, pls_list) { |
4541d16c | 1325 | if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && |
ce6ab4f2 | 1326 | !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) && |
7dc0ac70 | 1327 | pnfs_lseg_range_match(&lseg->pls_range, range)) { |
9369a431 | 1328 | ret = pnfs_get_lseg(lseg); |
b1f69b75 AA |
1329 | break; |
1330 | } | |
d771e3a4 | 1331 | if (lseg->pls_range.offset > range->offset) |
b1f69b75 AA |
1332 | break; |
1333 | } | |
1334 | ||
1335 | dprintk("%s:Return lseg %p ref %d\n", | |
4541d16c | 1336 | __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0); |
b1f69b75 | 1337 | return ret; |
e5e94017 BH |
1338 | } |
1339 | ||
d23d61c8 AA |
1340 | /* |
1341 | * Use mdsthreshold hints set at each OPEN to determine if I/O should go | |
1342 | * to the MDS or over pNFS | |
1343 | * | |
1344 | * The nfs_inode read_io and write_io fields are cumulative counters reset | |
1345 | * when there are no layout segments. Note that in pnfs_update_layout iomode | |
1346 | * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a | |
1347 | * WRITE request. | |
1348 | * | |
1349 | * A return of true means use MDS I/O. | |
1350 | * | |
1351 | * From rfc 5661: | |
1352 | * If a file's size is smaller than the file size threshold, data accesses | |
1353 | * SHOULD be sent to the metadata server. If an I/O request has a length that | |
1354 | * is below the I/O size threshold, the I/O SHOULD be sent to the metadata | |
1355 | * server. If both file size and I/O size are provided, the client SHOULD | |
1356 | * reach or exceed both thresholds before sending its read or write | |
1357 | * requests to the data server. | |
1358 | */ | |
1359 | static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, | |
1360 | struct inode *ino, int iomode) | |
1361 | { | |
1362 | struct nfs4_threshold *t = ctx->mdsthreshold; | |
1363 | struct nfs_inode *nfsi = NFS_I(ino); | |
1364 | loff_t fsize = i_size_read(ino); | |
1365 | bool size = false, size_set = false, io = false, io_set = false, ret = false; | |
1366 | ||
1367 | if (t == NULL) | |
1368 | return ret; | |
1369 | ||
1370 | dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n", | |
1371 | __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz); | |
1372 | ||
1373 | switch (iomode) { | |
1374 | case IOMODE_READ: | |
1375 | if (t->bm & THRESHOLD_RD) { | |
1376 | dprintk("%s fsize %llu\n", __func__, fsize); | |
1377 | size_set = true; | |
1378 | if (fsize < t->rd_sz) | |
1379 | size = true; | |
1380 | } | |
1381 | if (t->bm & THRESHOLD_RD_IO) { | |
1382 | dprintk("%s nfsi->read_io %llu\n", __func__, | |
1383 | nfsi->read_io); | |
1384 | io_set = true; | |
1385 | if (nfsi->read_io < t->rd_io_sz) | |
1386 | io = true; | |
1387 | } | |
1388 | break; | |
1389 | case IOMODE_RW: | |
1390 | if (t->bm & THRESHOLD_WR) { | |
1391 | dprintk("%s fsize %llu\n", __func__, fsize); | |
1392 | size_set = true; | |
1393 | if (fsize < t->wr_sz) | |
1394 | size = true; | |
1395 | } | |
1396 | if (t->bm & THRESHOLD_WR_IO) { | |
1397 | dprintk("%s nfsi->write_io %llu\n", __func__, | |
1398 | nfsi->write_io); | |
1399 | io_set = true; | |
1400 | if (nfsi->write_io < t->wr_io_sz) | |
1401 | io = true; | |
1402 | } | |
1403 | break; | |
1404 | } | |
1405 | if (size_set && io_set) { | |
1406 | if (size && io) | |
1407 | ret = true; | |
1408 | } else if (size || io) | |
1409 | ret = true; | |
1410 | ||
1411 | dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret); | |
1412 | return ret; | |
1413 | } | |
1414 | ||
aa8a45ee PT |
1415 | /* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ |
1416 | static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) | |
1417 | { | |
1418 | if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) | |
1419 | return 1; | |
1420 | return nfs_wait_bit_killable(key); | |
1421 | } | |
1422 | ||
1423 | static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) | |
1424 | { | |
1425 | /* | |
1426 | * send layoutcommit as it can hold up layoutreturn due to lseg | |
1427 | * reference | |
1428 | */ | |
1429 | pnfs_layoutcommit_inode(lo->plh_inode, false); | |
1430 | return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, | |
1431 | pnfs_layoutget_retry_bit_wait, | |
1432 | TASK_UNINTERRUPTIBLE); | |
1433 | } | |
1434 | ||
d67ae825 TH |
1435 | static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo) |
1436 | { | |
1437 | unsigned long *bitlock = &lo->plh_flags; | |
1438 | ||
1439 | clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock); | |
1440 | smp_mb__after_atomic(); | |
1441 | wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET); | |
1442 | } | |
1443 | ||
e5e94017 BH |
1444 | /* |
1445 | * Layout segment is retreived from the server if not cached. | |
1446 | * The appropriate layout segment is referenced and returned to the caller. | |
1447 | */ | |
7c24d948 | 1448 | struct pnfs_layout_segment * |
e5e94017 BH |
1449 | pnfs_update_layout(struct inode *ino, |
1450 | struct nfs_open_context *ctx, | |
fb3296eb BH |
1451 | loff_t pos, |
1452 | u64 count, | |
a75b9df9 TM |
1453 | enum pnfs_iomode iomode, |
1454 | gfp_t gfp_flags) | |
e5e94017 | 1455 | { |
fb3296eb BH |
1456 | struct pnfs_layout_range arg = { |
1457 | .iomode = iomode, | |
1458 | .offset = pos, | |
1459 | .length = count, | |
1460 | }; | |
707ed5fd | 1461 | unsigned pg_offset; |
6382a441 WAA |
1462 | struct nfs_server *server = NFS_SERVER(ino); |
1463 | struct nfs_client *clp = server->nfs_client; | |
e5e94017 BH |
1464 | struct pnfs_layout_hdr *lo; |
1465 | struct pnfs_layout_segment *lseg = NULL; | |
30005121 | 1466 | bool first; |
e5e94017 BH |
1467 | |
1468 | if (!pnfs_enabled_sb(NFS_SERVER(ino))) | |
f86bbcf8 | 1469 | goto out; |
d23d61c8 AA |
1470 | |
1471 | if (pnfs_within_mdsthreshold(ctx, ino, iomode)) | |
f86bbcf8 | 1472 | goto out; |
d23d61c8 | 1473 | |
9bf87482 PT |
1474 | lookup_again: |
1475 | first = false; | |
e5e94017 | 1476 | spin_lock(&ino->i_lock); |
9fa40758 | 1477 | lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); |
830ffb56 TM |
1478 | if (lo == NULL) { |
1479 | spin_unlock(&ino->i_lock); | |
1480 | goto out; | |
1481 | } | |
e5e94017 | 1482 | |
43f1b3da | 1483 | /* Do we even need to bother with this? */ |
a59c30ac | 1484 | if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { |
43f1b3da | 1485 | dprintk("%s matches recall, use MDS\n", __func__); |
e5e94017 BH |
1486 | goto out_unlock; |
1487 | } | |
1488 | ||
1489 | /* if LAYOUTGET already failed once we don't try again */ | |
aa8a45ee PT |
1490 | if (pnfs_layout_io_test_failed(lo, iomode) && |
1491 | !pnfs_should_retry_layoutget(lo)) | |
e5e94017 BH |
1492 | goto out_unlock; |
1493 | ||
9bf87482 PT |
1494 | first = list_empty(&lo->plh_segs); |
1495 | if (first) { | |
1496 | /* The first layoutget for the file. Need to serialize per | |
1497 | * RFC 5661 Errata 3208. | |
1498 | */ | |
1499 | if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, | |
1500 | &lo->plh_flags)) { | |
1501 | spin_unlock(&ino->i_lock); | |
1502 | wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET, | |
1503 | TASK_UNINTERRUPTIBLE); | |
1504 | pnfs_put_layout_hdr(lo); | |
1505 | goto lookup_again; | |
1506 | } | |
1507 | } else { | |
1508 | /* Check to see if the layout for the given range | |
1509 | * already exists | |
1510 | */ | |
1511 | lseg = pnfs_find_lseg(lo, &arg); | |
1512 | if (lseg) | |
1513 | goto out_unlock; | |
1514 | } | |
568e8c49 | 1515 | |
aa8a45ee PT |
1516 | /* |
1517 | * Because we free lsegs before sending LAYOUTRETURN, we need to wait | |
1518 | * for LAYOUTRETURN even if first is true. | |
1519 | */ | |
1520 | if (!lseg && pnfs_should_retry_layoutget(lo) && | |
1521 | test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { | |
1522 | spin_unlock(&ino->i_lock); | |
1523 | dprintk("%s wait for layoutreturn\n", __func__); | |
1524 | if (pnfs_prepare_to_retry_layoutget(lo)) { | |
d67ae825 TH |
1525 | if (first) |
1526 | pnfs_clear_first_layoutget(lo); | |
aa8a45ee PT |
1527 | pnfs_put_layout_hdr(lo); |
1528 | dprintk("%s retrying\n", __func__); | |
1529 | goto lookup_again; | |
1530 | } | |
1531 | goto out_put_layout_hdr; | |
1532 | } | |
1533 | ||
ce6ab4f2 | 1534 | if (pnfs_layoutgets_blocked(lo, &arg, 0)) |
cf7d63f1 FI |
1535 | goto out_unlock; |
1536 | atomic_inc(&lo->plh_outstanding); | |
f49f9baa | 1537 | spin_unlock(&ino->i_lock); |
30005121 | 1538 | |
abb9a007 | 1539 | if (list_empty(&lo->plh_layouts)) { |
2130ff66 FI |
1540 | /* The lo must be on the clp list if there is any |
1541 | * chance of a CB_LAYOUTRECALL(FILE) coming in. | |
1542 | */ | |
1543 | spin_lock(&clp->cl_lock); | |
abb9a007 PT |
1544 | if (list_empty(&lo->plh_layouts)) |
1545 | list_add_tail(&lo->plh_layouts, &server->layouts); | |
2130ff66 FI |
1546 | spin_unlock(&clp->cl_lock); |
1547 | } | |
e5e94017 | 1548 | |
707ed5fd BH |
1549 | pg_offset = arg.offset & ~PAGE_CACHE_MASK; |
1550 | if (pg_offset) { | |
1551 | arg.offset -= pg_offset; | |
1552 | arg.length += pg_offset; | |
1553 | } | |
7c24d948 AA |
1554 | if (arg.length != NFS4_MAX_UINT64) |
1555 | arg.length = PAGE_CACHE_ALIGN(arg.length); | |
707ed5fd | 1556 | |
fb3296eb | 1557 | lseg = send_layoutget(lo, ctx, &arg, gfp_flags); |
c829013d | 1558 | pnfs_clear_retry_layoutget(lo); |
cf7d63f1 | 1559 | atomic_dec(&lo->plh_outstanding); |
830ffb56 | 1560 | out_put_layout_hdr: |
d67ae825 TH |
1561 | if (first) |
1562 | pnfs_clear_first_layoutget(lo); | |
70c3bd2b | 1563 | pnfs_put_layout_hdr(lo); |
e5e94017 | 1564 | out: |
f86bbcf8 TM |
1565 | dprintk("%s: inode %s/%llu pNFS layout segment %s for " |
1566 | "(%s, offset: %llu, length: %llu)\n", | |
1567 | __func__, ino->i_sb->s_id, | |
1568 | (unsigned long long)NFS_FILEID(ino), | |
1569 | lseg == NULL ? "not found" : "found", | |
1570 | iomode==IOMODE_RW ? "read/write" : "read-only", | |
1571 | (unsigned long long)pos, | |
1572 | (unsigned long long)count); | |
e5e94017 BH |
1573 | return lseg; |
1574 | out_unlock: | |
1575 | spin_unlock(&ino->i_lock); | |
830ffb56 | 1576 | goto out_put_layout_hdr; |
e5e94017 | 1577 | } |
7c24d948 | 1578 | EXPORT_SYMBOL_GPL(pnfs_update_layout); |
b1f69b75 | 1579 | |
a0b0a6e3 | 1580 | struct pnfs_layout_segment * |
b1f69b75 AA |
1581 | pnfs_layout_process(struct nfs4_layoutget *lgp) |
1582 | { | |
1583 | struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout; | |
1584 | struct nfs4_layoutget_res *res = &lgp->res; | |
1585 | struct pnfs_layout_segment *lseg; | |
b7edfaa1 | 1586 | struct inode *ino = lo->plh_inode; |
78096cca | 1587 | LIST_HEAD(free_me); |
b1f69b75 AA |
1588 | int status = 0; |
1589 | ||
1590 | /* Inject layout blob into I/O device driver */ | |
a75b9df9 | 1591 | lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); |
b1f69b75 AA |
1592 | if (!lseg || IS_ERR(lseg)) { |
1593 | if (!lseg) | |
1594 | status = -ENOMEM; | |
1595 | else | |
1596 | status = PTR_ERR(lseg); | |
1597 | dprintk("%s: Could not allocate layout: error %d\n", | |
1598 | __func__, status); | |
1599 | goto out; | |
1600 | } | |
1601 | ||
1013df61 CH |
1602 | init_lseg(lo, lseg); |
1603 | lseg->pls_range = res->range; | |
1604 | ||
b1f69b75 | 1605 | spin_lock(&ino->i_lock); |
a59c30ac | 1606 | if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { |
43f1b3da FI |
1607 | dprintk("%s forget reply due to recall\n", __func__); |
1608 | goto out_forget_reply; | |
1609 | } | |
1610 | ||
ce6ab4f2 | 1611 | if (pnfs_layoutgets_blocked(lo, &lgp->args.range, 1)) { |
43f1b3da FI |
1612 | dprintk("%s forget reply due to state\n", __func__); |
1613 | goto out_forget_reply; | |
1614 | } | |
038d6493 | 1615 | |
362f7474 CH |
1616 | if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { |
1617 | /* existing state ID, make sure the sequence number matches. */ | |
1618 | if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { | |
1619 | dprintk("%s forget reply due to sequence\n", __func__); | |
1620 | goto out_forget_reply; | |
1621 | } | |
1622 | pnfs_set_layout_stateid(lo, &res->stateid, false); | |
1623 | } else { | |
1624 | /* | |
1625 | * We got an entirely new state ID. Mark all segments for the | |
1626 | * inode invalid, and don't bother validating the stateid | |
1627 | * sequence number. | |
1628 | */ | |
1629 | pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL); | |
1630 | ||
1631 | nfs4_stateid_copy(&lo->plh_stateid, &res->stateid); | |
1632 | lo->plh_barrier = be32_to_cpu(res->stateid.seqid); | |
1633 | } | |
038d6493 | 1634 | |
47abadef CH |
1635 | clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); |
1636 | ||
9369a431 | 1637 | pnfs_get_lseg(lseg); |
57036a37 | 1638 | pnfs_layout_insert_lseg(lo, lseg); |
b1f69b75 | 1639 | |
f7e8917a FI |
1640 | if (res->return_on_close) { |
1641 | set_bit(NFS_LSEG_ROC, &lseg->pls_flags); | |
1642 | set_bit(NFS_LAYOUT_ROC, &lo->plh_flags); | |
1643 | } | |
1644 | ||
b1f69b75 | 1645 | spin_unlock(&ino->i_lock); |
78096cca | 1646 | pnfs_free_lseg_list(&free_me); |
a0b0a6e3 | 1647 | return lseg; |
b1f69b75 | 1648 | out: |
a0b0a6e3 | 1649 | return ERR_PTR(status); |
43f1b3da FI |
1650 | |
1651 | out_forget_reply: | |
1652 | spin_unlock(&ino->i_lock); | |
1653 | lseg->pls_layout = lo; | |
1654 | NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); | |
1655 | goto out; | |
b1f69b75 AA |
1656 | } |
1657 | ||
016256df PT |
1658 | static void |
1659 | pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, | |
1660 | struct list_head *tmp_list, | |
1661 | struct pnfs_layout_range *return_range) | |
1662 | { | |
1663 | struct pnfs_layout_segment *lseg, *next; | |
1664 | ||
1665 | dprintk("%s:Begin lo %p\n", __func__, lo); | |
1666 | ||
1667 | if (list_empty(&lo->plh_segs)) | |
1668 | return; | |
1669 | ||
1670 | list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) | |
1671 | if (should_free_lseg(&lseg->pls_range, return_range)) { | |
1672 | dprintk("%s: marking lseg %p iomode %d " | |
1673 | "offset %llu length %llu\n", __func__, | |
1674 | lseg, lseg->pls_range.iomode, | |
1675 | lseg->pls_range.offset, | |
1676 | lseg->pls_range.length); | |
1677 | set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); | |
1678 | mark_lseg_invalid(lseg, tmp_list); | |
1679 | } | |
1680 | } | |
1681 | ||
1682 | void pnfs_error_mark_layout_for_return(struct inode *inode, | |
1683 | struct pnfs_layout_segment *lseg) | |
1684 | { | |
1685 | struct pnfs_layout_hdr *lo = NFS_I(inode)->layout; | |
1686 | int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode); | |
1687 | struct pnfs_layout_range range = { | |
1688 | .iomode = lseg->pls_range.iomode, | |
1689 | .offset = 0, | |
1690 | .length = NFS4_MAX_UINT64, | |
1691 | }; | |
1692 | LIST_HEAD(free_me); | |
1693 | ||
1694 | spin_lock(&inode->i_lock); | |
1695 | /* set failure bit so that pnfs path will be retried later */ | |
1696 | pnfs_layout_set_fail_bit(lo, iomode); | |
1697 | set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); | |
1698 | if (lo->plh_return_iomode == 0) | |
1699 | lo->plh_return_iomode = range.iomode; | |
1700 | else if (lo->plh_return_iomode != range.iomode) | |
1701 | lo->plh_return_iomode = IOMODE_ANY; | |
1702 | /* | |
1703 | * mark all matching lsegs so that we are sure to have no live | |
1704 | * segments at hand when sending layoutreturn. See pnfs_put_lseg() | |
1705 | * for how it works. | |
1706 | */ | |
1707 | pnfs_mark_matching_lsegs_return(lo, &free_me, &range); | |
1708 | spin_unlock(&inode->i_lock); | |
1709 | pnfs_free_lseg_list(&free_me); | |
1710 | } | |
1711 | EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return); | |
1712 | ||
d8007d4d TM |
1713 | void |
1714 | pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) | |
1715 | { | |
1fd937bd PT |
1716 | u64 rd_size = req->wb_bytes; |
1717 | ||
cb5d04bc PT |
1718 | if (pgio->pg_lseg == NULL) { |
1719 | if (pgio->pg_dreq == NULL) | |
1720 | rd_size = i_size_read(pgio->pg_inode) - req_offset(req); | |
1721 | else | |
1722 | rd_size = nfs_dreq_bytes_left(pgio->pg_dreq); | |
1723 | ||
1724 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | |
1725 | req->wb_context, | |
1726 | req_offset(req), | |
1727 | rd_size, | |
1728 | IOMODE_READ, | |
1729 | GFP_KERNEL); | |
1730 | } | |
e885de1a TM |
1731 | /* If no lseg, fall back to read through mds */ |
1732 | if (pgio->pg_lseg == NULL) | |
1f945357 | 1733 | nfs_pageio_reset_read_mds(pgio); |
e885de1a | 1734 | |
d8007d4d TM |
1735 | } |
1736 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read); | |
1737 | ||
1738 | void | |
6296556f PT |
1739 | pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, |
1740 | struct nfs_page *req, u64 wb_size) | |
d8007d4d | 1741 | { |
cb5d04bc PT |
1742 | if (pgio->pg_lseg == NULL) |
1743 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | |
1744 | req->wb_context, | |
1745 | req_offset(req), | |
1746 | wb_size, | |
1747 | IOMODE_RW, | |
1748 | GFP_NOFS); | |
e885de1a TM |
1749 | /* If no lseg, fall back to write through mds */ |
1750 | if (pgio->pg_lseg == NULL) | |
1f945357 | 1751 | nfs_pageio_reset_write_mds(pgio); |
d8007d4d TM |
1752 | } |
1753 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write); | |
1754 | ||
180bb5ec WAA |
1755 | void |
1756 | pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc) | |
1757 | { | |
1758 | if (desc->pg_lseg) { | |
1759 | pnfs_put_lseg(desc->pg_lseg); | |
1760 | desc->pg_lseg = NULL; | |
1761 | } | |
1762 | } | |
1763 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup); | |
1764 | ||
b4fdac1a WAA |
1765 | /* |
1766 | * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number | |
1767 | * of bytes (maximum @req->wb_bytes) that can be coalesced. | |
1768 | */ | |
1769 | size_t | |
a7d42ddb WAA |
1770 | pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, |
1771 | struct nfs_page *prev, struct nfs_page *req) | |
94ad1c80 | 1772 | { |
0f9c429e | 1773 | unsigned int size; |
c5e20cb7 | 1774 | u64 seg_end, req_start, seg_left; |
0f9c429e WAA |
1775 | |
1776 | size = nfs_generic_pg_test(pgio, prev, req); | |
0f9c429e WAA |
1777 | if (!size) |
1778 | return 0; | |
94ad1c80 | 1779 | |
19982ba8 | 1780 | /* |
c5e20cb7 WAA |
1781 | * 'size' contains the number of bytes left in the current page (up |
1782 | * to the original size asked for in @req->wb_bytes). | |
1783 | * | |
1784 | * Calculate how many bytes are left in the layout segment | |
1785 | * and if there are less bytes than 'size', return that instead. | |
19982ba8 TM |
1786 | * |
1787 | * Please also note that 'end_offset' is actually the offset of the | |
1788 | * first byte that lies outside the pnfs_layout_range. FIXME? | |
1789 | * | |
1790 | */ | |
19b54848 | 1791 | if (pgio->pg_lseg) { |
c5e20cb7 WAA |
1792 | seg_end = end_offset(pgio->pg_lseg->pls_range.offset, |
1793 | pgio->pg_lseg->pls_range.length); | |
1794 | req_start = req_offset(req); | |
7c13789e | 1795 | WARN_ON_ONCE(req_start >= seg_end); |
c5e20cb7 | 1796 | /* start of request is past the last byte of this segment */ |
7c13789e WAA |
1797 | if (req_start >= seg_end) { |
1798 | /* reference the new lseg */ | |
1799 | if (pgio->pg_ops->pg_cleanup) | |
1800 | pgio->pg_ops->pg_cleanup(pgio); | |
1801 | if (pgio->pg_ops->pg_init) | |
1802 | pgio->pg_ops->pg_init(pgio, req); | |
19b54848 | 1803 | return 0; |
7c13789e | 1804 | } |
c5e20cb7 WAA |
1805 | |
1806 | /* adjust 'size' iff there are fewer bytes left in the | |
1807 | * segment than what nfs_generic_pg_test returned */ | |
1808 | seg_left = seg_end - req_start; | |
1809 | if (seg_left < size) | |
1810 | size = (unsigned int)seg_left; | |
19b54848 | 1811 | } |
0f9c429e | 1812 | |
19b54848 | 1813 | return size; |
94ad1c80 | 1814 | } |
89a58e32 | 1815 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); |
94ad1c80 | 1816 | |
53113ad3 | 1817 | int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr) |
e2fecb21 TM |
1818 | { |
1819 | struct nfs_pageio_descriptor pgio; | |
e2fecb21 TM |
1820 | |
1821 | /* Resend all requests through the MDS */ | |
53113ad3 WAA |
1822 | nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true, |
1823 | hdr->completion_ops); | |
1824 | return nfs_pageio_resend(&pgio, hdr); | |
e2fecb21 | 1825 | } |
e7dd79af | 1826 | EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); |
e2fecb21 | 1827 | |
d45f60c6 | 1828 | static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr) |
1acbbb4e | 1829 | { |
cd841605 FI |
1830 | |
1831 | dprintk("pnfs write error = %d\n", hdr->pnfs_error); | |
1832 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & | |
1acbbb4e | 1833 | PNFS_LAYOUTRET_ON_ERROR) { |
cd841605 | 1834 | pnfs_return_layout(hdr->inode); |
1acbbb4e | 1835 | } |
6c75dc0d | 1836 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) |
53113ad3 | 1837 | hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr); |
1acbbb4e FI |
1838 | } |
1839 | ||
d20581aa BH |
1840 | /* |
1841 | * Called by non rpc-based layout drivers | |
1842 | */ | |
d45f60c6 | 1843 | void pnfs_ld_write_done(struct nfs_pgio_header *hdr) |
44b83799 | 1844 | { |
d45f60c6 | 1845 | trace_nfs4_pnfs_write(hdr, hdr->pnfs_error); |
cd841605 | 1846 | if (!hdr->pnfs_error) { |
67af7611 TM |
1847 | pnfs_set_layoutcommit(hdr->inode, hdr->lseg, |
1848 | hdr->mds_offset + hdr->res.count); | |
d45f60c6 | 1849 | hdr->mds_ops->rpc_call_done(&hdr->task, hdr); |
1acbbb4e | 1850 | } else |
d45f60c6 WAA |
1851 | pnfs_ld_handle_write_error(hdr); |
1852 | hdr->mds_ops->rpc_release(hdr); | |
44b83799 | 1853 | } |
d20581aa | 1854 | EXPORT_SYMBOL_GPL(pnfs_ld_write_done); |
44b83799 | 1855 | |
dce81290 TM |
1856 | static void |
1857 | pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, | |
d45f60c6 | 1858 | struct nfs_pgio_header *hdr) |
dce81290 | 1859 | { |
48d635f1 | 1860 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
a7d42ddb | 1861 | |
6c75dc0d | 1862 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
a7d42ddb | 1863 | list_splice_tail_init(&hdr->pages, &mirror->pg_list); |
6c75dc0d | 1864 | nfs_pageio_reset_write_mds(desc); |
a7d42ddb | 1865 | mirror->pg_recoalesce = 1; |
6c75dc0d | 1866 | } |
d45f60c6 | 1867 | nfs_pgio_data_destroy(hdr); |
dce81290 TM |
1868 | } |
1869 | ||
1870 | static enum pnfs_try_status | |
d45f60c6 | 1871 | pnfs_try_to_write_data(struct nfs_pgio_header *hdr, |
dce81290 TM |
1872 | const struct rpc_call_ops *call_ops, |
1873 | struct pnfs_layout_segment *lseg, | |
1874 | int how) | |
0382b744 | 1875 | { |
cd841605 | 1876 | struct inode *inode = hdr->inode; |
0382b744 AA |
1877 | enum pnfs_try_status trypnfs; |
1878 | struct nfs_server *nfss = NFS_SERVER(inode); | |
1879 | ||
cd841605 | 1880 | hdr->mds_ops = call_ops; |
0382b744 AA |
1881 | |
1882 | dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, | |
d45f60c6 WAA |
1883 | inode->i_ino, hdr->args.count, hdr->args.offset, how); |
1884 | trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how); | |
6c75dc0d | 1885 | if (trypnfs != PNFS_NOT_ATTEMPTED) |
0382b744 | 1886 | nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); |
0382b744 AA |
1887 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); |
1888 | return trypnfs; | |
1889 | } | |
1890 | ||
dce81290 | 1891 | static void |
7f714720 WAA |
1892 | pnfs_do_write(struct nfs_pageio_descriptor *desc, |
1893 | struct nfs_pgio_header *hdr, int how) | |
dce81290 | 1894 | { |
dce81290 TM |
1895 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; |
1896 | struct pnfs_layout_segment *lseg = desc->pg_lseg; | |
7f714720 | 1897 | enum pnfs_try_status trypnfs; |
dce81290 | 1898 | |
d45f60c6 | 1899 | trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how); |
7f714720 | 1900 | if (trypnfs == PNFS_NOT_ATTEMPTED) |
d45f60c6 | 1901 | pnfs_write_through_mds(desc, hdr); |
dce81290 TM |
1902 | } |
1903 | ||
6c75dc0d FI |
1904 | static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) |
1905 | { | |
9369a431 | 1906 | pnfs_put_lseg(hdr->lseg); |
1e7f3a48 | 1907 | nfs_pgio_header_free(hdr); |
6c75dc0d FI |
1908 | } |
1909 | ||
dce81290 TM |
1910 | int |
1911 | pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) | |
1912 | { | |
48d635f1 | 1913 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
a7d42ddb | 1914 | |
6c75dc0d | 1915 | struct nfs_pgio_header *hdr; |
dce81290 TM |
1916 | int ret; |
1917 | ||
1e7f3a48 WAA |
1918 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
1919 | if (!hdr) { | |
a7d42ddb | 1920 | desc->pg_completion_ops->error_cleanup(&mirror->pg_list); |
6c75dc0d | 1921 | return -ENOMEM; |
dce81290 | 1922 | } |
6c75dc0d | 1923 | nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); |
180bb5ec | 1924 | |
9369a431 | 1925 | hdr->lseg = pnfs_get_lseg(desc->pg_lseg); |
ef2c488c | 1926 | ret = nfs_generic_pgio(desc, hdr); |
180bb5ec | 1927 | if (!ret) |
7f714720 | 1928 | pnfs_do_write(desc, hdr, desc->pg_ioflags); |
a7d42ddb | 1929 | |
6c75dc0d | 1930 | return ret; |
dce81290 TM |
1931 | } |
1932 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); | |
1933 | ||
53113ad3 | 1934 | int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr) |
62e4a769 TM |
1935 | { |
1936 | struct nfs_pageio_descriptor pgio; | |
1937 | ||
1acbbb4e | 1938 | /* Resend all requests through the MDS */ |
53113ad3 WAA |
1939 | nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops); |
1940 | return nfs_pageio_resend(&pgio, hdr); | |
1acbbb4e | 1941 | } |
e7dd79af | 1942 | EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds); |
1acbbb4e | 1943 | |
d45f60c6 | 1944 | static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr) |
1acbbb4e | 1945 | { |
cd841605 FI |
1946 | dprintk("pnfs read error = %d\n", hdr->pnfs_error); |
1947 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & | |
1acbbb4e | 1948 | PNFS_LAYOUTRET_ON_ERROR) { |
cd841605 | 1949 | pnfs_return_layout(hdr->inode); |
1acbbb4e | 1950 | } |
4db6e0b7 | 1951 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) |
53113ad3 | 1952 | hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr); |
62e4a769 TM |
1953 | } |
1954 | ||
d20581aa BH |
1955 | /* |
1956 | * Called by non rpc-based layout drivers | |
1957 | */ | |
d45f60c6 | 1958 | void pnfs_ld_read_done(struct nfs_pgio_header *hdr) |
d20581aa | 1959 | { |
d45f60c6 | 1960 | trace_nfs4_pnfs_read(hdr, hdr->pnfs_error); |
cd841605 | 1961 | if (likely(!hdr->pnfs_error)) { |
d45f60c6 WAA |
1962 | __nfs4_read_done_cb(hdr); |
1963 | hdr->mds_ops->rpc_call_done(&hdr->task, hdr); | |
62e4a769 | 1964 | } else |
d45f60c6 WAA |
1965 | pnfs_ld_handle_read_error(hdr); |
1966 | hdr->mds_ops->rpc_release(hdr); | |
d20581aa BH |
1967 | } |
1968 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); | |
1969 | ||
493292dd TM |
1970 | static void |
1971 | pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, | |
d45f60c6 | 1972 | struct nfs_pgio_header *hdr) |
493292dd | 1973 | { |
48d635f1 | 1974 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
a7d42ddb | 1975 | |
4db6e0b7 | 1976 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
a7d42ddb | 1977 | list_splice_tail_init(&hdr->pages, &mirror->pg_list); |
4db6e0b7 | 1978 | nfs_pageio_reset_read_mds(desc); |
a7d42ddb | 1979 | mirror->pg_recoalesce = 1; |
4db6e0b7 | 1980 | } |
d45f60c6 | 1981 | nfs_pgio_data_destroy(hdr); |
493292dd TM |
1982 | } |
1983 | ||
64419a9b AA |
1984 | /* |
1985 | * Call the appropriate parallel I/O subsystem read function. | |
1986 | */ | |
493292dd | 1987 | static enum pnfs_try_status |
d45f60c6 | 1988 | pnfs_try_to_read_data(struct nfs_pgio_header *hdr, |
493292dd TM |
1989 | const struct rpc_call_ops *call_ops, |
1990 | struct pnfs_layout_segment *lseg) | |
64419a9b | 1991 | { |
cd841605 | 1992 | struct inode *inode = hdr->inode; |
64419a9b AA |
1993 | struct nfs_server *nfss = NFS_SERVER(inode); |
1994 | enum pnfs_try_status trypnfs; | |
1995 | ||
cd841605 | 1996 | hdr->mds_ops = call_ops; |
64419a9b AA |
1997 | |
1998 | dprintk("%s: Reading ino:%lu %u@%llu\n", | |
d45f60c6 | 1999 | __func__, inode->i_ino, hdr->args.count, hdr->args.offset); |
64419a9b | 2000 | |
d45f60c6 | 2001 | trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr); |
4db6e0b7 | 2002 | if (trypnfs != PNFS_NOT_ATTEMPTED) |
64419a9b | 2003 | nfs_inc_stats(inode, NFSIOS_PNFS_READ); |
64419a9b AA |
2004 | dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); |
2005 | return trypnfs; | |
2006 | } | |
863a3c6c | 2007 | |
ceb11e13 PT |
2008 | /* Resend all requests through pnfs. */ |
2009 | int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr) | |
2010 | { | |
2011 | struct nfs_pageio_descriptor pgio; | |
2012 | ||
2013 | nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops); | |
2014 | return nfs_pageio_resend(&pgio, hdr); | |
2015 | } | |
2016 | EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs); | |
2017 | ||
493292dd | 2018 | static void |
7f714720 | 2019 | pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) |
493292dd | 2020 | { |
493292dd TM |
2021 | const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; |
2022 | struct pnfs_layout_segment *lseg = desc->pg_lseg; | |
7f714720 | 2023 | enum pnfs_try_status trypnfs; |
ceb11e13 | 2024 | int err = 0; |
493292dd | 2025 | |
d45f60c6 | 2026 | trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg); |
ceb11e13 PT |
2027 | if (trypnfs == PNFS_TRY_AGAIN) |
2028 | err = pnfs_read_resend_pnfs(hdr); | |
2029 | if (trypnfs == PNFS_NOT_ATTEMPTED || err) | |
d45f60c6 | 2030 | pnfs_read_through_mds(desc, hdr); |
493292dd TM |
2031 | } |
2032 | ||
4db6e0b7 FI |
2033 | static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) |
2034 | { | |
9369a431 | 2035 | pnfs_put_lseg(hdr->lseg); |
1e7f3a48 | 2036 | nfs_pgio_header_free(hdr); |
4db6e0b7 FI |
2037 | } |
2038 | ||
493292dd TM |
2039 | int |
2040 | pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) | |
2041 | { | |
48d635f1 | 2042 | struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); |
a7d42ddb | 2043 | |
4db6e0b7 | 2044 | struct nfs_pgio_header *hdr; |
493292dd TM |
2045 | int ret; |
2046 | ||
1e7f3a48 WAA |
2047 | hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); |
2048 | if (!hdr) { | |
a7d42ddb | 2049 | desc->pg_completion_ops->error_cleanup(&mirror->pg_list); |
180bb5ec | 2050 | return -ENOMEM; |
493292dd | 2051 | } |
4db6e0b7 | 2052 | nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); |
9369a431 | 2053 | hdr->lseg = pnfs_get_lseg(desc->pg_lseg); |
ef2c488c | 2054 | ret = nfs_generic_pgio(desc, hdr); |
180bb5ec | 2055 | if (!ret) |
7f714720 | 2056 | pnfs_do_read(desc, hdr); |
4db6e0b7 | 2057 | return ret; |
493292dd TM |
2058 | } |
2059 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); | |
2060 | ||
71244d9b TM |
2061 | static void pnfs_clear_layoutcommitting(struct inode *inode) |
2062 | { | |
2063 | unsigned long *bitlock = &NFS_I(inode)->flags; | |
2064 | ||
2065 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); | |
4e857c58 | 2066 | smp_mb__after_atomic(); |
71244d9b TM |
2067 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); |
2068 | } | |
2069 | ||
863a3c6c | 2070 | /* |
a9bae566 | 2071 | * There can be multiple RW segments. |
863a3c6c | 2072 | */ |
a9bae566 | 2073 | static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) |
863a3c6c | 2074 | { |
a9bae566 | 2075 | struct pnfs_layout_segment *lseg; |
863a3c6c | 2076 | |
a9bae566 PT |
2077 | list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { |
2078 | if (lseg->pls_range.iomode == IOMODE_RW && | |
a073dbff | 2079 | test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) |
a9bae566 PT |
2080 | list_add(&lseg->pls_lc_list, listp); |
2081 | } | |
863a3c6c AA |
2082 | } |
2083 | ||
a073dbff TM |
2084 | static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) |
2085 | { | |
2086 | struct pnfs_layout_segment *lseg, *tmp; | |
a073dbff TM |
2087 | |
2088 | /* Matched by references in pnfs_set_layoutcommit */ | |
2089 | list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { | |
2090 | list_del_init(&lseg->pls_lc_list); | |
2091 | pnfs_put_lseg(lseg); | |
2092 | } | |
2093 | ||
71244d9b | 2094 | pnfs_clear_layoutcommitting(inode); |
a073dbff TM |
2095 | } |
2096 | ||
1b0ae068 PT |
2097 | void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) |
2098 | { | |
b9e028fd | 2099 | pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); |
1b0ae068 PT |
2100 | } |
2101 | EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); | |
2102 | ||
863a3c6c | 2103 | void |
67af7611 TM |
2104 | pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg, |
2105 | loff_t end_pos) | |
863a3c6c | 2106 | { |
cd841605 | 2107 | struct nfs_inode *nfsi = NFS_I(inode); |
79a48a1f | 2108 | bool mark_as_dirty = false; |
863a3c6c | 2109 | |
cd841605 | 2110 | spin_lock(&inode->i_lock); |
863a3c6c | 2111 | if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { |
29559b11 | 2112 | nfsi->layout->plh_lwb = end_pos; |
79a48a1f | 2113 | mark_as_dirty = true; |
863a3c6c | 2114 | dprintk("%s: Set layoutcommit for inode %lu ", |
cd841605 | 2115 | __func__, inode->i_ino); |
29559b11 TM |
2116 | } else if (end_pos > nfsi->layout->plh_lwb) |
2117 | nfsi->layout->plh_lwb = end_pos; | |
67af7611 | 2118 | if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) { |
a9bae566 | 2119 | /* references matched in nfs4_layoutcommit_release */ |
67af7611 | 2120 | pnfs_get_lseg(lseg); |
a9bae566 | 2121 | } |
cd841605 | 2122 | spin_unlock(&inode->i_lock); |
acff5880 | 2123 | dprintk("%s: lseg %p end_pos %llu\n", |
67af7611 | 2124 | __func__, lseg, nfsi->layout->plh_lwb); |
79a48a1f WAA |
2125 | |
2126 | /* if pnfs_layoutcommit_inode() runs between inode locks, the next one | |
2127 | * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ | |
2128 | if (mark_as_dirty) | |
cd841605 | 2129 | mark_inode_dirty_sync(inode); |
863a3c6c AA |
2130 | } |
2131 | EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); | |
2132 | ||
db29c089 AA |
2133 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) |
2134 | { | |
2135 | struct nfs_server *nfss = NFS_SERVER(data->args.inode); | |
2136 | ||
2137 | if (nfss->pnfs_curr_ld->cleanup_layoutcommit) | |
2138 | nfss->pnfs_curr_ld->cleanup_layoutcommit(data); | |
a073dbff | 2139 | pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list); |
db29c089 AA |
2140 | } |
2141 | ||
de4b15c7 AA |
2142 | /* |
2143 | * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and | |
2144 | * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough | |
2145 | * data to disk to allow the server to recover the data if it crashes. | |
2146 | * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag | |
2147 | * is off, and a COMMIT is sent to a data server, or | |
2148 | * if WRITEs to a data server return NFS_DATA_SYNC. | |
2149 | */ | |
863a3c6c | 2150 | int |
ef311537 | 2151 | pnfs_layoutcommit_inode(struct inode *inode, bool sync) |
863a3c6c | 2152 | { |
5f919c9f | 2153 | struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; |
863a3c6c AA |
2154 | struct nfs4_layoutcommit_data *data; |
2155 | struct nfs_inode *nfsi = NFS_I(inode); | |
863a3c6c | 2156 | loff_t end_pos; |
71244d9b | 2157 | int status; |
863a3c6c | 2158 | |
71244d9b | 2159 | if (!pnfs_layoutcommit_outstanding(inode)) |
de4b15c7 AA |
2160 | return 0; |
2161 | ||
71244d9b | 2162 | dprintk("--> %s inode %lu\n", __func__, inode->i_ino); |
92407e75 | 2163 | |
71244d9b | 2164 | status = -EAGAIN; |
92407e75 | 2165 | if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { |
71244d9b TM |
2166 | if (!sync) |
2167 | goto out; | |
74316201 | 2168 | status = wait_on_bit_lock_action(&nfsi->flags, |
71244d9b TM |
2169 | NFS_INO_LAYOUTCOMMITTING, |
2170 | nfs_wait_bit_killable, | |
2171 | TASK_KILLABLE); | |
92407e75 | 2172 | if (status) |
71244d9b | 2173 | goto out; |
92407e75 PT |
2174 | } |
2175 | ||
71244d9b TM |
2176 | status = -ENOMEM; |
2177 | /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ | |
2178 | data = kzalloc(sizeof(*data), GFP_NOFS); | |
2179 | if (!data) | |
2180 | goto clear_layoutcommitting; | |
2181 | ||
2182 | status = 0; | |
de4b15c7 | 2183 | spin_lock(&inode->i_lock); |
71244d9b TM |
2184 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) |
2185 | goto out_unlock; | |
a9bae566 | 2186 | |
71244d9b | 2187 | INIT_LIST_HEAD(&data->lseg_list); |
a9bae566 | 2188 | pnfs_list_write_lseg(inode, &data->lseg_list); |
863a3c6c | 2189 | |
acff5880 | 2190 | end_pos = nfsi->layout->plh_lwb; |
863a3c6c | 2191 | |
f597c537 | 2192 | nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid); |
863a3c6c AA |
2193 | spin_unlock(&inode->i_lock); |
2194 | ||
2195 | data->args.inode = inode; | |
9fa40758 | 2196 | data->cred = get_rpccred(nfsi->layout->plh_lc_cred); |
863a3c6c AA |
2197 | nfs_fattr_init(&data->fattr); |
2198 | data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; | |
2199 | data->res.fattr = &data->fattr; | |
2200 | data->args.lastbytewritten = end_pos - 1; | |
2201 | data->res.server = NFS_SERVER(inode); | |
2202 | ||
5f919c9f CH |
2203 | if (ld->prepare_layoutcommit) { |
2204 | status = ld->prepare_layoutcommit(&data->args); | |
2205 | if (status) { | |
2206 | spin_lock(&inode->i_lock); | |
29559b11 TM |
2207 | set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); |
2208 | if (end_pos > nfsi->layout->plh_lwb) | |
5f919c9f CH |
2209 | nfsi->layout->plh_lwb = end_pos; |
2210 | spin_unlock(&inode->i_lock); | |
2211 | put_rpccred(data->cred); | |
5f919c9f CH |
2212 | goto clear_layoutcommitting; |
2213 | } | |
2214 | } | |
2215 | ||
2216 | ||
863a3c6c AA |
2217 | status = nfs4_proc_layoutcommit(data, sync); |
2218 | out: | |
92407e75 PT |
2219 | if (status) |
2220 | mark_inode_dirty_sync(inode); | |
863a3c6c AA |
2221 | dprintk("<-- %s status %d\n", __func__, status); |
2222 | return status; | |
71244d9b TM |
2223 | out_unlock: |
2224 | spin_unlock(&inode->i_lock); | |
92407e75 | 2225 | kfree(data); |
71244d9b TM |
2226 | clear_layoutcommitting: |
2227 | pnfs_clear_layoutcommitting(inode); | |
92407e75 | 2228 | goto out; |
863a3c6c | 2229 | } |
72cff449 | 2230 | EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode); |
82be417a | 2231 | |
5bb89b47 TM |
2232 | int |
2233 | pnfs_generic_sync(struct inode *inode, bool datasync) | |
2234 | { | |
2235 | return pnfs_layoutcommit_inode(inode, true); | |
2236 | } | |
2237 | EXPORT_SYMBOL_GPL(pnfs_generic_sync); | |
2238 | ||
82be417a AA |
2239 | struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) |
2240 | { | |
2241 | struct nfs4_threshold *thp; | |
2242 | ||
2243 | thp = kzalloc(sizeof(*thp), GFP_NOFS); | |
2244 | if (!thp) { | |
2245 | dprintk("%s mdsthreshold allocation failed\n", __func__); | |
2246 | return NULL; | |
2247 | } | |
2248 | return thp; | |
2249 | } |