]> Git Repo - linux.git/blob - fs/afs/fs_operation.c
Linux 6.14-rc3
[linux.git] / fs / afs / fs_operation.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Fileserver-directed operation handling.
3  *
4  * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells ([email protected])
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include "internal.h"
12
13 static atomic_t afs_operation_debug_counter;
14
15 /*
16  * Create an operation against a volume.
17  */
18 struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume)
19 {
20         struct afs_operation *op;
21
22         _enter("");
23
24         op = kzalloc(sizeof(*op), GFP_KERNEL);
25         if (!op)
26                 return ERR_PTR(-ENOMEM);
27
28         if (!key) {
29                 key = afs_request_key(volume->cell);
30                 if (IS_ERR(key)) {
31                         kfree(op);
32                         return ERR_CAST(key);
33                 }
34         } else {
35                 key_get(key);
36         }
37
38         op->key                 = key;
39         op->volume              = afs_get_volume(volume, afs_volume_trace_get_new_op);
40         op->net                 = volume->cell->net;
41         op->cb_v_break          = atomic_read(&volume->cb_v_break);
42         op->pre_volsync.creation = volume->creation_time;
43         op->pre_volsync.update  = volume->update_time;
44         op->debug_id            = atomic_inc_return(&afs_operation_debug_counter);
45         op->nr_iterations       = -1;
46         afs_op_set_error(op, -EDESTADDRREQ);
47
48         _leave(" = [op=%08x]", op->debug_id);
49         return op;
50 }
51
52 struct afs_io_locker {
53         struct list_head        link;
54         struct task_struct      *task;
55         unsigned long           have_lock;
56 };
57
58 /*
59  * Unlock the I/O lock on a vnode.
60  */
61 static void afs_unlock_for_io(struct afs_vnode *vnode)
62 {
63         struct afs_io_locker *locker;
64
65         spin_lock(&vnode->lock);
66         locker = list_first_entry_or_null(&vnode->io_lock_waiters,
67                                           struct afs_io_locker, link);
68         if (locker) {
69                 list_del(&locker->link);
70                 smp_store_release(&locker->have_lock, 1); /* The unlock barrier. */
71                 smp_mb__after_atomic(); /* Store have_lock before task state */
72                 wake_up_process(locker->task);
73         } else {
74                 clear_bit(AFS_VNODE_IO_LOCK, &vnode->flags);
75         }
76         spin_unlock(&vnode->lock);
77 }
78
79 /*
80  * Lock the I/O lock on a vnode uninterruptibly.  We can't use an ordinary
81  * mutex as lockdep will complain if we unlock it in the wrong thread.
82  */
83 static void afs_lock_for_io(struct afs_vnode *vnode)
84 {
85         struct afs_io_locker myself = { .task = current, };
86
87         spin_lock(&vnode->lock);
88
89         if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
90                 spin_unlock(&vnode->lock);
91                 return;
92         }
93
94         list_add_tail(&myself.link, &vnode->io_lock_waiters);
95         spin_unlock(&vnode->lock);
96
97         for (;;) {
98                 set_current_state(TASK_UNINTERRUPTIBLE);
99                 if (smp_load_acquire(&myself.have_lock)) /* The lock barrier */
100                         break;
101                 schedule();
102         }
103         __set_current_state(TASK_RUNNING);
104 }
105
106 /*
107  * Lock the I/O lock on a vnode interruptibly.  We can't use an ordinary mutex
108  * as lockdep will complain if we unlock it in the wrong thread.
109  */
110 static int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
111 {
112         struct afs_io_locker myself = { .task = current, };
113         int ret = 0;
114
115         spin_lock(&vnode->lock);
116
117         if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
118                 spin_unlock(&vnode->lock);
119                 return 0;
120         }
121
122         list_add_tail(&myself.link, &vnode->io_lock_waiters);
123         spin_unlock(&vnode->lock);
124
125         for (;;) {
126                 set_current_state(TASK_INTERRUPTIBLE);
127                 if (smp_load_acquire(&myself.have_lock) || /* The lock barrier */
128                     signal_pending(current))
129                         break;
130                 schedule();
131         }
132         __set_current_state(TASK_RUNNING);
133
134         /* If we got a signal, try to transfer the lock onto the next
135          * waiter.
136          */
137         if (unlikely(signal_pending(current))) {
138                 spin_lock(&vnode->lock);
139                 if (myself.have_lock) {
140                         spin_unlock(&vnode->lock);
141                         afs_unlock_for_io(vnode);
142                 } else {
143                         list_del(&myself.link);
144                         spin_unlock(&vnode->lock);
145                 }
146                 ret = -ERESTARTSYS;
147         }
148         return ret;
149 }
150
151 /*
152  * Lock the vnode(s) being operated upon.
153  */
154 static bool afs_get_io_locks(struct afs_operation *op)
155 {
156         struct afs_vnode *vnode = op->file[0].vnode;
157         struct afs_vnode *vnode2 = op->file[1].vnode;
158
159         _enter("");
160
161         if (op->flags & AFS_OPERATION_UNINTR) {
162                 afs_lock_for_io(vnode);
163                 op->flags |= AFS_OPERATION_LOCK_0;
164                 _leave(" = t [1]");
165                 return true;
166         }
167
168         if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2)
169                 vnode2 = NULL;
170
171         if (vnode2 > vnode)
172                 swap(vnode, vnode2);
173
174         if (afs_lock_for_io_interruptible(vnode) < 0) {
175                 afs_op_set_error(op, -ERESTARTSYS);
176                 op->flags |= AFS_OPERATION_STOP;
177                 _leave(" = f [I 0]");
178                 return false;
179         }
180         op->flags |= AFS_OPERATION_LOCK_0;
181
182         if (vnode2) {
183                 if (afs_lock_for_io_interruptible(vnode2) < 0) {
184                         afs_op_set_error(op, -ERESTARTSYS);
185                         op->flags |= AFS_OPERATION_STOP;
186                         afs_unlock_for_io(vnode);
187                         op->flags &= ~AFS_OPERATION_LOCK_0;
188                         _leave(" = f [I 1]");
189                         return false;
190                 }
191                 op->flags |= AFS_OPERATION_LOCK_1;
192         }
193
194         _leave(" = t [2]");
195         return true;
196 }
197
198 static void afs_drop_io_locks(struct afs_operation *op)
199 {
200         struct afs_vnode *vnode = op->file[0].vnode;
201         struct afs_vnode *vnode2 = op->file[1].vnode;
202
203         _enter("");
204
205         if (op->flags & AFS_OPERATION_LOCK_1)
206                 afs_unlock_for_io(vnode2);
207         if (op->flags & AFS_OPERATION_LOCK_0)
208                 afs_unlock_for_io(vnode);
209 }
210
211 static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
212                               unsigned int index)
213 {
214         struct afs_vnode *vnode = vp->vnode;
215
216         if (vnode) {
217                 vp->fid                 = vnode->fid;
218                 vp->dv_before           = vnode->status.data_version;
219                 vp->cb_break_before     = afs_calc_vnode_cb_break(vnode);
220                 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
221                         op->flags       |= AFS_OPERATION_CUR_ONLY;
222                 if (vp->modification)
223                         set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
224         }
225
226         if (vp->fid.vnode)
227                 _debug("PREP[%u] {%llx:%llu.%u}",
228                        index, vp->fid.vid, vp->fid.vnode, vp->fid.unique);
229 }
230
231 /*
232  * Begin an operation on the fileserver.
233  *
234  * Fileserver operations are serialised on the server by vnode, so we serialise
235  * them here also using the io_lock.
236  */
237 bool afs_begin_vnode_operation(struct afs_operation *op)
238 {
239         struct afs_vnode *vnode = op->file[0].vnode;
240
241         ASSERT(vnode);
242
243         _enter("");
244
245         if (op->file[0].need_io_lock)
246                 if (!afs_get_io_locks(op))
247                         return false;
248
249         afs_prepare_vnode(op, &op->file[0], 0);
250         afs_prepare_vnode(op, &op->file[1], 1);
251         op->cb_v_break = atomic_read(&op->volume->cb_v_break);
252         _leave(" = true");
253         return true;
254 }
255
256 /*
257  * Tidy up a filesystem cursor and unlock the vnode.
258  */
259 void afs_end_vnode_operation(struct afs_operation *op)
260 {
261         _enter("");
262
263         switch (afs_op_error(op)) {
264         case -EDESTADDRREQ:
265         case -EADDRNOTAVAIL:
266         case -ENETUNREACH:
267         case -EHOSTUNREACH:
268                 afs_dump_edestaddrreq(op);
269                 break;
270         }
271
272         afs_drop_io_locks(op);
273 }
274
275 /*
276  * Wait for an in-progress operation to complete.
277  */
278 void afs_wait_for_operation(struct afs_operation *op)
279 {
280         _enter("");
281
282         while (afs_select_fileserver(op)) {
283                 op->call_responded = false;
284                 op->call_error = 0;
285                 op->call_abort_code = 0;
286                 if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
287                     op->ops->issue_yfs_rpc)
288                         op->ops->issue_yfs_rpc(op);
289                 else if (op->ops->issue_afs_rpc)
290                         op->ops->issue_afs_rpc(op);
291                 else
292                         op->call_error = -ENOTSUPP;
293
294                 if (op->call) {
295                         afs_wait_for_call_to_complete(op->call);
296                         op->call_abort_code = op->call->abort_code;
297                         op->call_error = op->call->error;
298                         op->call_responded = op->call->responded;
299                         afs_put_call(op->call);
300                 }
301         }
302
303         if (op->call_responded && op->server)
304                 set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
305
306         if (!afs_op_error(op)) {
307                 _debug("success");
308                 op->ops->success(op);
309         } else if (op->cumul_error.aborted) {
310                 if (op->ops->aborted)
311                         op->ops->aborted(op);
312         } else {
313                 if (op->ops->failed)
314                         op->ops->failed(op);
315         }
316
317         afs_end_vnode_operation(op);
318
319         if (!afs_op_error(op) && op->ops->edit_dir) {
320                 _debug("edit_dir");
321                 op->ops->edit_dir(op);
322         }
323         _leave("");
324 }
325
326 /*
327  * Dispose of an operation.
328  */
329 int afs_put_operation(struct afs_operation *op)
330 {
331         struct afs_addr_list *alist;
332         int i, ret = afs_op_error(op);
333
334         _enter("op=%08x,%d", op->debug_id, ret);
335
336         if (op->ops && op->ops->put)
337                 op->ops->put(op);
338         if (op->file[0].modification)
339                 clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
340         if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
341                 clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
342         if (op->file[0].put_vnode)
343                 iput(&op->file[0].vnode->netfs.inode);
344         if (op->file[1].put_vnode)
345                 iput(&op->file[1].vnode->netfs.inode);
346
347         if (op->more_files) {
348                 for (i = 0; i < op->nr_files - 2; i++)
349                         if (op->more_files[i].put_vnode)
350                                 iput(&op->more_files[i].vnode->netfs.inode);
351                 kfree(op->more_files);
352         }
353
354         if (op->estate) {
355                 alist = op->estate->addresses;
356                 if (alist) {
357                         if (op->call_responded &&
358                             op->addr_index != alist->preferred &&
359                             test_bit(alist->preferred, &op->addr_tried))
360                                 WRITE_ONCE(alist->preferred, op->addr_index);
361                 }
362         }
363
364         afs_clear_server_states(op);
365         afs_put_serverlist(op->net, op->server_list);
366         afs_put_volume(op->volume, afs_volume_trace_put_put_op);
367         key_put(op->key);
368         kfree(op);
369         return ret;
370 }
371
372 int afs_do_sync_operation(struct afs_operation *op)
373 {
374         afs_begin_vnode_operation(op);
375         afs_wait_for_operation(op);
376         return afs_put_operation(op);
377 }
This page took 0.052562 seconds and 4 git commands to generate.