]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/xprt.c | |
3 | * | |
4 | * This is a generic RPC call interface supporting congestion avoidance, | |
5 | * and asynchronous calls. | |
6 | * | |
7 | * The interface works like this: | |
8 | * | |
9 | * - When a process places a call, it allocates a request slot if | |
10 | * one is available. Otherwise, it sleeps on the backlog queue | |
11 | * (xprt_reserve). | |
12 | * - Next, the caller puts together the RPC message, stuffs it into | |
55aa4f58 CL |
13 | * the request struct, and calls xprt_transmit(). |
14 | * - xprt_transmit sends the message and installs the caller on the | |
55ae1aab RL |
15 | * transport's wait list. At the same time, if a reply is expected, |
16 | * it installs a timer that is run after the packet's timeout has | |
17 | * expired. | |
1da177e4 | 18 | * - When a packet arrives, the data_ready handler walks the list of |
55aa4f58 | 19 | * pending requests for that transport. If a matching XID is found, the |
1da177e4 LT |
20 | * caller is woken up, and the timer removed. |
21 | * - When no reply arrives within the timeout interval, the timer is | |
22 | * fired by the kernel and runs xprt_timer(). It either adjusts the | |
23 | * timeout values (minor timeout) or wakes up the caller with a status | |
24 | * of -ETIMEDOUT. | |
25 | * - When the caller receives a notification from RPC that a reply arrived, | |
26 | * it should release the RPC slot, and process the reply. | |
27 | * If the call timed out, it may choose to retry the operation by | |
28 | * adjusting the initial timeout value, and simply calling rpc_call | |
29 | * again. | |
30 | * | |
31 | * Support for async RPC is done through a set of RPC-specific scheduling | |
32 | * primitives that `transparently' work for processes as well as async | |
33 | * tasks that rely on callbacks. | |
34 | * | |
35 | * Copyright (C) 1995-1997, Olaf Kirch <[email protected]> | |
55aa4f58 CL |
36 | * |
37 | * Transport switch API copyright (C) 2005, Chuck Lever <[email protected]> | |
1da177e4 LT |
38 | */ |
39 | ||
a246b010 CL |
40 | #include <linux/module.h> |
41 | ||
1da177e4 | 42 | #include <linux/types.h> |
a246b010 | 43 | #include <linux/interrupt.h> |
1da177e4 | 44 | #include <linux/workqueue.h> |
bf3fcf89 | 45 | #include <linux/net.h> |
ff839970 | 46 | #include <linux/ktime.h> |
1da177e4 | 47 | |
a246b010 | 48 | #include <linux/sunrpc/clnt.h> |
11c556b3 | 49 | #include <linux/sunrpc/metrics.h> |
c9acb42e | 50 | #include <linux/sunrpc/bc_xprt.h> |
fda1bfef | 51 | #include <linux/rcupdate.h> |
1da177e4 | 52 | |
3705ad64 JL |
53 | #include <trace/events/sunrpc.h> |
54 | ||
55ae1aab RL |
55 | #include "sunrpc.h" |
56 | ||
1da177e4 LT |
57 | /* |
58 | * Local variables | |
59 | */ | |
60 | ||
f895b252 | 61 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
1da177e4 LT |
62 | # define RPCDBG_FACILITY RPCDBG_XPRT |
63 | #endif | |
64 | ||
1da177e4 LT |
65 | /* |
66 | * Local functions | |
67 | */ | |
21de0a95 | 68 | static void xprt_init(struct rpc_xprt *xprt, struct net *net); |
37ac86c3 | 69 | static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); |
1da177e4 | 70 | static void xprt_connect_status(struct rpc_task *task); |
4e0038b6 | 71 | static void xprt_destroy(struct rpc_xprt *xprt); |
1da177e4 | 72 | |
5ba03e82 | 73 | static DEFINE_SPINLOCK(xprt_list_lock); |
81c098af TT |
74 | static LIST_HEAD(xprt_list); |
75 | ||
81c098af TT |
76 | /** |
77 | * xprt_register_transport - register a transport implementation | |
78 | * @transport: transport to register | |
79 | * | |
80 | * If a transport implementation is loaded as a kernel module, it can | |
81 | * call this interface to make itself known to the RPC client. | |
82 | * | |
83 | * Returns: | |
84 | * 0: transport successfully registered | |
85 | * -EEXIST: transport already registered | |
86 | * -EINVAL: transport module being unloaded | |
87 | */ | |
88 | int xprt_register_transport(struct xprt_class *transport) | |
89 | { | |
90 | struct xprt_class *t; | |
91 | int result; | |
92 | ||
93 | result = -EEXIST; | |
94 | spin_lock(&xprt_list_lock); | |
95 | list_for_each_entry(t, &xprt_list, list) { | |
96 | /* don't register the same transport class twice */ | |
4fa016eb | 97 | if (t->ident == transport->ident) |
81c098af TT |
98 | goto out; |
99 | } | |
100 | ||
c9f6cde6 DL |
101 | list_add_tail(&transport->list, &xprt_list); |
102 | printk(KERN_INFO "RPC: Registered %s transport module.\n", | |
103 | transport->name); | |
104 | result = 0; | |
81c098af TT |
105 | |
106 | out: | |
107 | spin_unlock(&xprt_list_lock); | |
108 | return result; | |
109 | } | |
110 | EXPORT_SYMBOL_GPL(xprt_register_transport); | |
111 | ||
112 | /** | |
113 | * xprt_unregister_transport - unregister a transport implementation | |
65b6e42c | 114 | * @transport: transport to unregister |
81c098af TT |
115 | * |
116 | * Returns: | |
117 | * 0: transport successfully unregistered | |
118 | * -ENOENT: transport never registered | |
119 | */ | |
120 | int xprt_unregister_transport(struct xprt_class *transport) | |
121 | { | |
122 | struct xprt_class *t; | |
123 | int result; | |
124 | ||
125 | result = 0; | |
126 | spin_lock(&xprt_list_lock); | |
127 | list_for_each_entry(t, &xprt_list, list) { | |
128 | if (t == transport) { | |
129 | printk(KERN_INFO | |
130 | "RPC: Unregistered %s transport module.\n", | |
131 | transport->name); | |
132 | list_del_init(&transport->list); | |
81c098af TT |
133 | goto out; |
134 | } | |
135 | } | |
136 | result = -ENOENT; | |
137 | ||
138 | out: | |
139 | spin_unlock(&xprt_list_lock); | |
140 | return result; | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(xprt_unregister_transport); | |
143 | ||
441e3e24 TT |
144 | /** |
145 | * xprt_load_transport - load a transport implementation | |
146 | * @transport_name: transport to load | |
147 | * | |
148 | * Returns: | |
149 | * 0: transport successfully loaded | |
150 | * -ENOENT: transport module not available | |
151 | */ | |
152 | int xprt_load_transport(const char *transport_name) | |
153 | { | |
154 | struct xprt_class *t; | |
441e3e24 TT |
155 | int result; |
156 | ||
157 | result = 0; | |
158 | spin_lock(&xprt_list_lock); | |
159 | list_for_each_entry(t, &xprt_list, list) { | |
160 | if (strcmp(t->name, transport_name) == 0) { | |
161 | spin_unlock(&xprt_list_lock); | |
162 | goto out; | |
163 | } | |
164 | } | |
165 | spin_unlock(&xprt_list_lock); | |
ef7ffe8f | 166 | result = request_module("xprt%s", transport_name); |
441e3e24 TT |
167 | out: |
168 | return result; | |
169 | } | |
170 | EXPORT_SYMBOL_GPL(xprt_load_transport); | |
171 | ||
c544577d TM |
172 | static void xprt_clear_locked(struct rpc_xprt *xprt) |
173 | { | |
174 | xprt->snd_task = NULL; | |
175 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { | |
176 | smp_mb__before_atomic(); | |
177 | clear_bit(XPRT_LOCKED, &xprt->state); | |
178 | smp_mb__after_atomic(); | |
179 | } else | |
180 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); | |
181 | } | |
182 | ||
12a80469 CL |
183 | /** |
184 | * xprt_reserve_xprt - serialize write access to transports | |
185 | * @task: task that is requesting access to the transport | |
177c27bf | 186 | * @xprt: pointer to the target transport |
12a80469 CL |
187 | * |
188 | * This prevents mixing the payload of separate requests, and prevents | |
189 | * transport connects from colliding with writes. No congestion control | |
190 | * is provided. | |
191 | */ | |
43cedbf0 | 192 | int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
12a80469 | 193 | { |
12a80469 CL |
194 | struct rpc_rqst *req = task->tk_rqstp; |
195 | ||
196 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | |
197 | if (task == xprt->snd_task) | |
198 | return 1; | |
12a80469 CL |
199 | goto out_sleep; |
200 | } | |
c544577d TM |
201 | if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) |
202 | goto out_unlock; | |
12a80469 | 203 | xprt->snd_task = task; |
4d4a76f3 | 204 | |
12a80469 CL |
205 | return 1; |
206 | ||
c544577d TM |
207 | out_unlock: |
208 | xprt_clear_locked(xprt); | |
12a80469 | 209 | out_sleep: |
46121cf7 | 210 | dprintk("RPC: %5u failed to lock transport %p\n", |
12a80469 | 211 | task->tk_pid, xprt); |
f05d54ec | 212 | task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; |
12a80469 | 213 | task->tk_status = -EAGAIN; |
79c99152 | 214 | rpc_sleep_on(&xprt->sending, task, NULL); |
12a80469 CL |
215 | return 0; |
216 | } | |
12444809 | 217 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt); |
12a80469 | 218 | |
75891f50 TM |
219 | static bool |
220 | xprt_need_congestion_window_wait(struct rpc_xprt *xprt) | |
221 | { | |
222 | return test_bit(XPRT_CWND_WAIT, &xprt->state); | |
223 | } | |
224 | ||
225 | static void | |
226 | xprt_set_congestion_window_wait(struct rpc_xprt *xprt) | |
227 | { | |
228 | if (!list_empty(&xprt->xmit_queue)) { | |
229 | /* Peek at head of queue to see if it can make progress */ | |
230 | if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, | |
231 | rq_xmit)->rq_cong) | |
232 | return; | |
233 | } | |
234 | set_bit(XPRT_CWND_WAIT, &xprt->state); | |
235 | } | |
236 | ||
237 | static void | |
238 | xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) | |
239 | { | |
240 | if (!RPCXPRT_CONGESTED(xprt)) | |
241 | clear_bit(XPRT_CWND_WAIT, &xprt->state); | |
242 | } | |
243 | ||
1da177e4 | 244 | /* |
12a80469 CL |
245 | * xprt_reserve_xprt_cong - serialize write access to transports |
246 | * @task: task that is requesting access to the transport | |
247 | * | |
248 | * Same as xprt_reserve_xprt, but Van Jacobson congestion control is | |
249 | * integrated into the decision of whether a request is allowed to be | |
250 | * woken up and given access to the transport. | |
75891f50 | 251 | * Note that the lock is only granted if we know there are free slots. |
1da177e4 | 252 | */ |
43cedbf0 | 253 | int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
1da177e4 LT |
254 | { |
255 | struct rpc_rqst *req = task->tk_rqstp; | |
256 | ||
2226feb6 | 257 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
1da177e4 LT |
258 | if (task == xprt->snd_task) |
259 | return 1; | |
1da177e4 LT |
260 | goto out_sleep; |
261 | } | |
43cedbf0 TM |
262 | if (req == NULL) { |
263 | xprt->snd_task = task; | |
264 | return 1; | |
265 | } | |
c544577d TM |
266 | if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) |
267 | goto out_unlock; | |
75891f50 | 268 | if (!xprt_need_congestion_window_wait(xprt)) { |
1da177e4 | 269 | xprt->snd_task = task; |
1da177e4 LT |
270 | return 1; |
271 | } | |
c544577d | 272 | out_unlock: |
632e3bdc | 273 | xprt_clear_locked(xprt); |
1da177e4 | 274 | out_sleep: |
46121cf7 | 275 | dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); |
f05d54ec | 276 | task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; |
1da177e4 | 277 | task->tk_status = -EAGAIN; |
79c99152 | 278 | rpc_sleep_on(&xprt->sending, task, NULL); |
1da177e4 LT |
279 | return 0; |
280 | } | |
12444809 | 281 | EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); |
1da177e4 | 282 | |
12a80469 | 283 | static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) |
1da177e4 LT |
284 | { |
285 | int retval; | |
286 | ||
bd79bc57 TM |
287 | if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) |
288 | return 1; | |
4a0f8c04 | 289 | spin_lock_bh(&xprt->transport_lock); |
43cedbf0 | 290 | retval = xprt->ops->reserve_xprt(xprt, task); |
4a0f8c04 | 291 | spin_unlock_bh(&xprt->transport_lock); |
1da177e4 LT |
292 | return retval; |
293 | } | |
294 | ||
961a828d | 295 | static bool __xprt_lock_write_func(struct rpc_task *task, void *data) |
49e9a890 | 296 | { |
961a828d | 297 | struct rpc_xprt *xprt = data; |
49e9a890 | 298 | |
49e9a890 | 299 | xprt->snd_task = task; |
961a828d TM |
300 | return true; |
301 | } | |
49e9a890 | 302 | |
961a828d TM |
303 | static void __xprt_lock_write_next(struct rpc_xprt *xprt) |
304 | { | |
305 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | |
306 | return; | |
c544577d TM |
307 | if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) |
308 | goto out_unlock; | |
f1dc237c TM |
309 | if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, |
310 | __xprt_lock_write_func, xprt)) | |
961a828d | 311 | return; |
c544577d | 312 | out_unlock: |
632e3bdc | 313 | xprt_clear_locked(xprt); |
49e9a890 CL |
314 | } |
315 | ||
961a828d TM |
316 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) |
317 | { | |
318 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | |
319 | return; | |
c544577d TM |
320 | if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) |
321 | goto out_unlock; | |
75891f50 | 322 | if (xprt_need_congestion_window_wait(xprt)) |
961a828d | 323 | goto out_unlock; |
f1dc237c | 324 | if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, |
75891f50 | 325 | __xprt_lock_write_func, xprt)) |
961a828d | 326 | return; |
1da177e4 | 327 | out_unlock: |
632e3bdc | 328 | xprt_clear_locked(xprt); |
1da177e4 LT |
329 | } |
330 | ||
49e9a890 CL |
331 | /** |
332 | * xprt_release_xprt - allow other requests to use a transport | |
333 | * @xprt: transport with other tasks potentially waiting | |
334 | * @task: task that is releasing access to the transport | |
335 | * | |
336 | * Note that "task" can be NULL. No congestion control is provided. | |
1da177e4 | 337 | */ |
49e9a890 | 338 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
1da177e4 LT |
339 | { |
340 | if (xprt->snd_task == task) { | |
632e3bdc | 341 | xprt_clear_locked(xprt); |
1da177e4 LT |
342 | __xprt_lock_write_next(xprt); |
343 | } | |
344 | } | |
12444809 | 345 | EXPORT_SYMBOL_GPL(xprt_release_xprt); |
1da177e4 | 346 | |
49e9a890 CL |
347 | /** |
348 | * xprt_release_xprt_cong - allow other requests to use a transport | |
349 | * @xprt: transport with other tasks potentially waiting | |
350 | * @task: task that is releasing access to the transport | |
351 | * | |
352 | * Note that "task" can be NULL. Another task is awoken to use the | |
353 | * transport if the transport's congestion window allows it. | |
354 | */ | |
355 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) | |
356 | { | |
357 | if (xprt->snd_task == task) { | |
632e3bdc | 358 | xprt_clear_locked(xprt); |
49e9a890 CL |
359 | __xprt_lock_write_next_cong(xprt); |
360 | } | |
361 | } | |
12444809 | 362 | EXPORT_SYMBOL_GPL(xprt_release_xprt_cong); |
49e9a890 CL |
363 | |
364 | static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) | |
1da177e4 | 365 | { |
bd79bc57 TM |
366 | if (xprt->snd_task != task) |
367 | return; | |
4a0f8c04 | 368 | spin_lock_bh(&xprt->transport_lock); |
49e9a890 | 369 | xprt->ops->release_xprt(xprt, task); |
4a0f8c04 | 370 | spin_unlock_bh(&xprt->transport_lock); |
1da177e4 LT |
371 | } |
372 | ||
1da177e4 LT |
373 | /* |
374 | * Van Jacobson congestion avoidance. Check if the congestion window | |
375 | * overflowed. Put the task to sleep if this is the case. | |
376 | */ | |
377 | static int | |
75891f50 | 378 | __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) |
1da177e4 | 379 | { |
1da177e4 LT |
380 | if (req->rq_cong) |
381 | return 1; | |
46121cf7 | 382 | dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n", |
75891f50 TM |
383 | req->rq_task->tk_pid, xprt->cong, xprt->cwnd); |
384 | if (RPCXPRT_CONGESTED(xprt)) { | |
385 | xprt_set_congestion_window_wait(xprt); | |
1da177e4 | 386 | return 0; |
75891f50 | 387 | } |
1da177e4 LT |
388 | req->rq_cong = 1; |
389 | xprt->cong += RPC_CWNDSCALE; | |
390 | return 1; | |
391 | } | |
392 | ||
393 | /* | |
394 | * Adjust the congestion window, and wake up the next task | |
395 | * that has been sleeping due to congestion | |
396 | */ | |
397 | static void | |
398 | __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) | |
399 | { | |
400 | if (!req->rq_cong) | |
401 | return; | |
402 | req->rq_cong = 0; | |
403 | xprt->cong -= RPC_CWNDSCALE; | |
75891f50 | 404 | xprt_test_and_clear_congestion_window_wait(xprt); |
49e9a890 | 405 | __xprt_lock_write_next_cong(xprt); |
1da177e4 LT |
406 | } |
407 | ||
75891f50 TM |
408 | /** |
409 | * xprt_request_get_cong - Request congestion control credits | |
410 | * @xprt: pointer to transport | |
411 | * @req: pointer to RPC request | |
412 | * | |
413 | * Useful for transports that require congestion control. | |
414 | */ | |
415 | bool | |
416 | xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) | |
417 | { | |
418 | bool ret = false; | |
419 | ||
420 | if (req->rq_cong) | |
421 | return true; | |
422 | spin_lock_bh(&xprt->transport_lock); | |
423 | ret = __xprt_get_cong(xprt, req) != 0; | |
424 | spin_unlock_bh(&xprt->transport_lock); | |
425 | return ret; | |
426 | } | |
427 | EXPORT_SYMBOL_GPL(xprt_request_get_cong); | |
428 | ||
a58dd398 CL |
429 | /** |
430 | * xprt_release_rqst_cong - housekeeping when request is complete | |
431 | * @task: RPC request that recently completed | |
432 | * | |
433 | * Useful for transports that require congestion control. | |
434 | */ | |
435 | void xprt_release_rqst_cong(struct rpc_task *task) | |
436 | { | |
a4f0835c TM |
437 | struct rpc_rqst *req = task->tk_rqstp; |
438 | ||
439 | __xprt_put_cong(req->rq_xprt, req); | |
a58dd398 | 440 | } |
12444809 | 441 | EXPORT_SYMBOL_GPL(xprt_release_rqst_cong); |
a58dd398 | 442 | |
75891f50 TM |
443 | /* |
444 | * Clear the congestion window wait flag and wake up the next | |
445 | * entry on xprt->sending | |
446 | */ | |
447 | static void | |
448 | xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) | |
449 | { | |
450 | if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { | |
451 | spin_lock_bh(&xprt->transport_lock); | |
452 | __xprt_lock_write_next_cong(xprt); | |
453 | spin_unlock_bh(&xprt->transport_lock); | |
454 | } | |
455 | } | |
456 | ||
46c0ee8b CL |
457 | /** |
458 | * xprt_adjust_cwnd - adjust transport congestion window | |
6a24dfb6 | 459 | * @xprt: pointer to xprt |
46c0ee8b CL |
460 | * @task: recently completed RPC request used to adjust window |
461 | * @result: result code of completed RPC request | |
462 | * | |
4f4cf5ad CL |
463 | * The transport code maintains an estimate on the maximum number of out- |
464 | * standing RPC requests, using a smoothed version of the congestion | |
465 | * avoidance implemented in 44BSD. This is basically the Van Jacobson | |
466 | * congestion algorithm: If a retransmit occurs, the congestion window is | |
467 | * halved; otherwise, it is incremented by 1/cwnd when | |
468 | * | |
469 | * - a reply is received and | |
470 | * - a full number of requests are outstanding and | |
471 | * - the congestion window hasn't been updated recently. | |
1da177e4 | 472 | */ |
6a24dfb6 | 473 | void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) |
1da177e4 | 474 | { |
46c0ee8b | 475 | struct rpc_rqst *req = task->tk_rqstp; |
46c0ee8b | 476 | unsigned long cwnd = xprt->cwnd; |
1da177e4 | 477 | |
1da177e4 LT |
478 | if (result >= 0 && cwnd <= xprt->cong) { |
479 | /* The (cwnd >> 1) term makes sure | |
480 | * the result gets rounded properly. */ | |
481 | cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; | |
482 | if (cwnd > RPC_MAXCWND(xprt)) | |
483 | cwnd = RPC_MAXCWND(xprt); | |
49e9a890 | 484 | __xprt_lock_write_next_cong(xprt); |
1da177e4 LT |
485 | } else if (result == -ETIMEDOUT) { |
486 | cwnd >>= 1; | |
487 | if (cwnd < RPC_CWNDSCALE) | |
488 | cwnd = RPC_CWNDSCALE; | |
489 | } | |
46121cf7 | 490 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", |
1da177e4 LT |
491 | xprt->cong, xprt->cwnd, cwnd); |
492 | xprt->cwnd = cwnd; | |
46c0ee8b | 493 | __xprt_put_cong(xprt, req); |
1da177e4 | 494 | } |
12444809 | 495 | EXPORT_SYMBOL_GPL(xprt_adjust_cwnd); |
1da177e4 | 496 | |
44fbac22 CL |
497 | /** |
498 | * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue | |
499 | * @xprt: transport with waiting tasks | |
500 | * @status: result code to plant in each task before waking it | |
501 | * | |
502 | */ | |
503 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) | |
504 | { | |
505 | if (status < 0) | |
506 | rpc_wake_up_status(&xprt->pending, status); | |
507 | else | |
508 | rpc_wake_up(&xprt->pending); | |
509 | } | |
12444809 | 510 | EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); |
44fbac22 | 511 | |
c7b2cae8 CL |
512 | /** |
513 | * xprt_wait_for_buffer_space - wait for transport output buffer to clear | |
c544577d | 514 | * @xprt: transport |
a9a6b52e TM |
515 | * |
516 | * Note that we only set the timer for the case of RPC_IS_SOFT(), since | |
517 | * we don't in general want to force a socket disconnection due to | |
518 | * an incomplete RPC call transmission. | |
c7b2cae8 | 519 | */ |
c544577d | 520 | void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) |
c7b2cae8 | 521 | { |
c544577d | 522 | set_bit(XPRT_WRITE_SPACE, &xprt->state); |
c7b2cae8 | 523 | } |
12444809 | 524 | EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); |
c7b2cae8 | 525 | |
c544577d TM |
526 | static bool |
527 | xprt_clear_write_space_locked(struct rpc_xprt *xprt) | |
528 | { | |
529 | if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { | |
530 | __xprt_lock_write_next(xprt); | |
531 | dprintk("RPC: write space: waking waiting task on " | |
532 | "xprt %p\n", xprt); | |
533 | return true; | |
534 | } | |
535 | return false; | |
536 | } | |
537 | ||
c7b2cae8 CL |
538 | /** |
539 | * xprt_write_space - wake the task waiting for transport output buffer space | |
540 | * @xprt: transport with waiting tasks | |
541 | * | |
542 | * Can be called in a soft IRQ context, so xprt_write_space never sleeps. | |
543 | */ | |
c544577d | 544 | bool xprt_write_space(struct rpc_xprt *xprt) |
c7b2cae8 | 545 | { |
c544577d TM |
546 | bool ret; |
547 | ||
548 | if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) | |
549 | return false; | |
c7b2cae8 | 550 | spin_lock_bh(&xprt->transport_lock); |
c544577d | 551 | ret = xprt_clear_write_space_locked(xprt); |
c7b2cae8 | 552 | spin_unlock_bh(&xprt->transport_lock); |
c544577d | 553 | return ret; |
c7b2cae8 | 554 | } |
12444809 | 555 | EXPORT_SYMBOL_GPL(xprt_write_space); |
c7b2cae8 | 556 | |
fe3aca29 CL |
557 | /** |
558 | * xprt_set_retrans_timeout_def - set a request's retransmit timeout | |
559 | * @task: task whose timeout is to be set | |
560 | * | |
561 | * Set a request's retransmit timeout based on the transport's | |
562 | * default timeout parameters. Used by transports that don't adjust | |
563 | * the retransmit timeout based on round-trip time estimation. | |
564 | */ | |
565 | void xprt_set_retrans_timeout_def(struct rpc_task *task) | |
566 | { | |
567 | task->tk_timeout = task->tk_rqstp->rq_timeout; | |
568 | } | |
12444809 | 569 | EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); |
fe3aca29 | 570 | |
2c53040f | 571 | /** |
fe3aca29 CL |
572 | * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout |
573 | * @task: task whose timeout is to be set | |
cca5172a | 574 | * |
fe3aca29 CL |
575 | * Set a request's retransmit timeout using the RTT estimator. |
576 | */ | |
577 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task) | |
578 | { | |
579 | int timer = task->tk_msg.rpc_proc->p_timer; | |
ba7392bb TM |
580 | struct rpc_clnt *clnt = task->tk_client; |
581 | struct rpc_rtt *rtt = clnt->cl_rtt; | |
fe3aca29 | 582 | struct rpc_rqst *req = task->tk_rqstp; |
ba7392bb | 583 | unsigned long max_timeout = clnt->cl_timeout->to_maxval; |
fe3aca29 CL |
584 | |
585 | task->tk_timeout = rpc_calc_rto(rtt, timer); | |
586 | task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; | |
587 | if (task->tk_timeout > max_timeout || task->tk_timeout == 0) | |
588 | task->tk_timeout = max_timeout; | |
589 | } | |
12444809 | 590 | EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt); |
fe3aca29 | 591 | |
1da177e4 LT |
592 | static void xprt_reset_majortimeo(struct rpc_rqst *req) |
593 | { | |
ba7392bb | 594 | const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; |
1da177e4 LT |
595 | |
596 | req->rq_majortimeo = req->rq_timeout; | |
597 | if (to->to_exponential) | |
598 | req->rq_majortimeo <<= to->to_retries; | |
599 | else | |
600 | req->rq_majortimeo += to->to_increment * to->to_retries; | |
601 | if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) | |
602 | req->rq_majortimeo = to->to_maxval; | |
603 | req->rq_majortimeo += jiffies; | |
604 | } | |
605 | ||
9903cd1c CL |
606 | /** |
607 | * xprt_adjust_timeout - adjust timeout values for next retransmit | |
608 | * @req: RPC request containing parameters to use for the adjustment | |
609 | * | |
1da177e4 LT |
610 | */ |
611 | int xprt_adjust_timeout(struct rpc_rqst *req) | |
612 | { | |
613 | struct rpc_xprt *xprt = req->rq_xprt; | |
ba7392bb | 614 | const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; |
1da177e4 LT |
615 | int status = 0; |
616 | ||
617 | if (time_before(jiffies, req->rq_majortimeo)) { | |
618 | if (to->to_exponential) | |
619 | req->rq_timeout <<= 1; | |
620 | else | |
621 | req->rq_timeout += to->to_increment; | |
622 | if (to->to_maxval && req->rq_timeout >= to->to_maxval) | |
623 | req->rq_timeout = to->to_maxval; | |
624 | req->rq_retries++; | |
1da177e4 LT |
625 | } else { |
626 | req->rq_timeout = to->to_initval; | |
627 | req->rq_retries = 0; | |
628 | xprt_reset_majortimeo(req); | |
629 | /* Reset the RTT counters == "slow start" */ | |
4a0f8c04 | 630 | spin_lock_bh(&xprt->transport_lock); |
1da177e4 | 631 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
4a0f8c04 | 632 | spin_unlock_bh(&xprt->transport_lock); |
1da177e4 LT |
633 | status = -ETIMEDOUT; |
634 | } | |
635 | ||
636 | if (req->rq_timeout == 0) { | |
637 | printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); | |
638 | req->rq_timeout = 5 * HZ; | |
639 | } | |
640 | return status; | |
641 | } | |
642 | ||
65f27f38 | 643 | static void xprt_autoclose(struct work_struct *work) |
1da177e4 | 644 | { |
65f27f38 DH |
645 | struct rpc_xprt *xprt = |
646 | container_of(work, struct rpc_xprt, task_cleanup); | |
1da177e4 | 647 | |
66af1e55 | 648 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
4876cc77 | 649 | xprt->ops->close(xprt); |
1da177e4 | 650 | xprt_release_write(xprt, NULL); |
79234c3d | 651 | wake_up_bit(&xprt->state, XPRT_LOCKED); |
1da177e4 LT |
652 | } |
653 | ||
9903cd1c | 654 | /** |
62da3b24 | 655 | * xprt_disconnect_done - mark a transport as disconnected |
9903cd1c CL |
656 | * @xprt: transport to flag for disconnect |
657 | * | |
1da177e4 | 658 | */ |
62da3b24 | 659 | void xprt_disconnect_done(struct rpc_xprt *xprt) |
1da177e4 | 660 | { |
46121cf7 | 661 | dprintk("RPC: disconnected transport %p\n", xprt); |
4a0f8c04 | 662 | spin_lock_bh(&xprt->transport_lock); |
1da177e4 | 663 | xprt_clear_connected(xprt); |
c544577d | 664 | xprt_clear_write_space_locked(xprt); |
2a491991 | 665 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
4a0f8c04 | 666 | spin_unlock_bh(&xprt->transport_lock); |
1da177e4 | 667 | } |
62da3b24 | 668 | EXPORT_SYMBOL_GPL(xprt_disconnect_done); |
1da177e4 | 669 | |
66af1e55 TM |
670 | /** |
671 | * xprt_force_disconnect - force a transport to disconnect | |
672 | * @xprt: transport to disconnect | |
673 | * | |
674 | */ | |
675 | void xprt_force_disconnect(struct rpc_xprt *xprt) | |
676 | { | |
677 | /* Don't race with the test_bit() in xprt_clear_locked() */ | |
678 | spin_lock_bh(&xprt->transport_lock); | |
679 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | |
680 | /* Try to schedule an autoclose RPC call */ | |
681 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | |
40a5f1b1 | 682 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
2a491991 | 683 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
66af1e55 TM |
684 | spin_unlock_bh(&xprt->transport_lock); |
685 | } | |
e2a4f4fb | 686 | EXPORT_SYMBOL_GPL(xprt_force_disconnect); |
66af1e55 | 687 | |
7f3a1d1e TM |
688 | static unsigned int |
689 | xprt_connect_cookie(struct rpc_xprt *xprt) | |
690 | { | |
691 | return READ_ONCE(xprt->connect_cookie); | |
692 | } | |
693 | ||
694 | static bool | |
695 | xprt_request_retransmit_after_disconnect(struct rpc_task *task) | |
696 | { | |
697 | struct rpc_rqst *req = task->tk_rqstp; | |
698 | struct rpc_xprt *xprt = req->rq_xprt; | |
699 | ||
700 | return req->rq_connect_cookie != xprt_connect_cookie(xprt) || | |
701 | !xprt_connected(xprt); | |
702 | } | |
703 | ||
7c1d71cf TM |
704 | /** |
705 | * xprt_conditional_disconnect - force a transport to disconnect | |
706 | * @xprt: transport to disconnect | |
707 | * @cookie: 'connection cookie' | |
708 | * | |
709 | * This attempts to break the connection if and only if 'cookie' matches | |
710 | * the current transport 'connection cookie'. It ensures that we don't | |
711 | * try to break the connection more than once when we need to retransmit | |
712 | * a batch of RPC requests. | |
713 | * | |
714 | */ | |
715 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) | |
716 | { | |
717 | /* Don't race with the test_bit() in xprt_clear_locked() */ | |
718 | spin_lock_bh(&xprt->transport_lock); | |
719 | if (cookie != xprt->connect_cookie) | |
720 | goto out; | |
2c2ee6d2 | 721 | if (test_bit(XPRT_CLOSING, &xprt->state)) |
7c1d71cf TM |
722 | goto out; |
723 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | |
724 | /* Try to schedule an autoclose RPC call */ | |
725 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | |
40a5f1b1 | 726 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
2a491991 | 727 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
7c1d71cf TM |
728 | out: |
729 | spin_unlock_bh(&xprt->transport_lock); | |
730 | } | |
731 | ||
ad3331ac TM |
732 | static bool |
733 | xprt_has_timer(const struct rpc_xprt *xprt) | |
734 | { | |
735 | return xprt->idle_timeout != 0; | |
736 | } | |
737 | ||
738 | static void | |
739 | xprt_schedule_autodisconnect(struct rpc_xprt *xprt) | |
740 | __must_hold(&xprt->transport_lock) | |
741 | { | |
95f7691d | 742 | if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) |
ad3331ac TM |
743 | mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); |
744 | } | |
745 | ||
1da177e4 | 746 | static void |
ff861c4d | 747 | xprt_init_autodisconnect(struct timer_list *t) |
1da177e4 | 748 | { |
ff861c4d | 749 | struct rpc_xprt *xprt = from_timer(xprt, t, timer); |
1da177e4 | 750 | |
4a0f8c04 | 751 | spin_lock(&xprt->transport_lock); |
95f7691d | 752 | if (!RB_EMPTY_ROOT(&xprt->recv_queue)) |
1da177e4 | 753 | goto out_abort; |
ad3331ac TM |
754 | /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ |
755 | xprt->last_used = jiffies; | |
2226feb6 | 756 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
1da177e4 | 757 | goto out_abort; |
4a0f8c04 | 758 | spin_unlock(&xprt->transport_lock); |
40a5f1b1 | 759 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
1da177e4 LT |
760 | return; |
761 | out_abort: | |
4a0f8c04 | 762 | spin_unlock(&xprt->transport_lock); |
1da177e4 LT |
763 | } |
764 | ||
718ba5b8 TM |
765 | bool xprt_lock_connect(struct rpc_xprt *xprt, |
766 | struct rpc_task *task, | |
767 | void *cookie) | |
768 | { | |
769 | bool ret = false; | |
770 | ||
771 | spin_lock_bh(&xprt->transport_lock); | |
772 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | |
773 | goto out; | |
774 | if (xprt->snd_task != task) | |
775 | goto out; | |
776 | xprt->snd_task = cookie; | |
777 | ret = true; | |
778 | out: | |
779 | spin_unlock_bh(&xprt->transport_lock); | |
780 | return ret; | |
781 | } | |
782 | ||
783 | void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | |
784 | { | |
785 | spin_lock_bh(&xprt->transport_lock); | |
786 | if (xprt->snd_task != cookie) | |
787 | goto out; | |
788 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | |
789 | goto out; | |
790 | xprt->snd_task =NULL; | |
791 | xprt->ops->release_xprt(xprt, NULL); | |
ad3331ac | 792 | xprt_schedule_autodisconnect(xprt); |
718ba5b8 TM |
793 | out: |
794 | spin_unlock_bh(&xprt->transport_lock); | |
79234c3d | 795 | wake_up_bit(&xprt->state, XPRT_LOCKED); |
718ba5b8 TM |
796 | } |
797 | ||
9903cd1c CL |
798 | /** |
799 | * xprt_connect - schedule a transport connect operation | |
800 | * @task: RPC task that is requesting the connect | |
1da177e4 LT |
801 | * |
802 | */ | |
803 | void xprt_connect(struct rpc_task *task) | |
804 | { | |
ad2368d6 | 805 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
1da177e4 | 806 | |
46121cf7 | 807 | dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid, |
1da177e4 LT |
808 | xprt, (xprt_connected(xprt) ? "is" : "is not")); |
809 | ||
ec739ef0 | 810 | if (!xprt_bound(xprt)) { |
01d37c42 | 811 | task->tk_status = -EAGAIN; |
1da177e4 LT |
812 | return; |
813 | } | |
814 | if (!xprt_lock_write(xprt, task)) | |
815 | return; | |
feb8ca37 TM |
816 | |
817 | if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) | |
818 | xprt->ops->close(xprt); | |
819 | ||
718ba5b8 | 820 | if (!xprt_connected(xprt)) { |
a8ce4a8f | 821 | task->tk_timeout = task->tk_rqstp->rq_timeout; |
2c2ee6d2 | 822 | task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; |
5d00837b | 823 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status); |
0b9e7943 TM |
824 | |
825 | if (test_bit(XPRT_CLOSING, &xprt->state)) | |
826 | return; | |
827 | if (xprt_test_and_set_connecting(xprt)) | |
828 | return; | |
262ca07d | 829 | xprt->stat.connect_start = jiffies; |
1b092092 | 830 | xprt->ops->connect(xprt, task); |
1da177e4 | 831 | } |
718ba5b8 | 832 | xprt_release_write(xprt, task); |
1da177e4 LT |
833 | } |
834 | ||
9903cd1c | 835 | static void xprt_connect_status(struct rpc_task *task) |
1da177e4 | 836 | { |
ad2368d6 | 837 | struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; |
1da177e4 | 838 | |
cd983ef8 | 839 | if (task->tk_status == 0) { |
262ca07d CL |
840 | xprt->stat.connect_count++; |
841 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; | |
46121cf7 | 842 | dprintk("RPC: %5u xprt_connect_status: connection established\n", |
1da177e4 LT |
843 | task->tk_pid); |
844 | return; | |
845 | } | |
846 | ||
1da177e4 | 847 | switch (task->tk_status) { |
0fe8d04e TM |
848 | case -ECONNREFUSED: |
849 | case -ECONNRESET: | |
850 | case -ECONNABORTED: | |
851 | case -ENETUNREACH: | |
852 | case -EHOSTUNREACH: | |
2fc193cf | 853 | case -EPIPE: |
2a491991 TM |
854 | case -EAGAIN: |
855 | dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); | |
23475d66 | 856 | break; |
1da177e4 | 857 | case -ETIMEDOUT: |
46121cf7 CL |
858 | dprintk("RPC: %5u xprt_connect_status: connect attempt timed " |
859 | "out\n", task->tk_pid); | |
1da177e4 LT |
860 | break; |
861 | default: | |
46121cf7 CL |
862 | dprintk("RPC: %5u xprt_connect_status: error %d connecting to " |
863 | "server %s\n", task->tk_pid, -task->tk_status, | |
4e0038b6 | 864 | xprt->servername); |
23475d66 | 865 | task->tk_status = -EIO; |
1da177e4 | 866 | } |
1da177e4 LT |
867 | } |
868 | ||
95f7691d TM |
869 | enum xprt_xid_rb_cmp { |
870 | XID_RB_EQUAL, | |
871 | XID_RB_LEFT, | |
872 | XID_RB_RIGHT, | |
873 | }; | |
874 | static enum xprt_xid_rb_cmp | |
875 | xprt_xid_cmp(__be32 xid1, __be32 xid2) | |
876 | { | |
877 | if (xid1 == xid2) | |
878 | return XID_RB_EQUAL; | |
879 | if ((__force u32)xid1 < (__force u32)xid2) | |
880 | return XID_RB_LEFT; | |
881 | return XID_RB_RIGHT; | |
882 | } | |
883 | ||
884 | static struct rpc_rqst * | |
885 | xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) | |
886 | { | |
887 | struct rb_node *n = xprt->recv_queue.rb_node; | |
888 | struct rpc_rqst *req; | |
889 | ||
890 | while (n != NULL) { | |
891 | req = rb_entry(n, struct rpc_rqst, rq_recv); | |
892 | switch (xprt_xid_cmp(xid, req->rq_xid)) { | |
893 | case XID_RB_LEFT: | |
894 | n = n->rb_left; | |
895 | break; | |
896 | case XID_RB_RIGHT: | |
897 | n = n->rb_right; | |
898 | break; | |
899 | case XID_RB_EQUAL: | |
900 | return req; | |
901 | } | |
902 | } | |
903 | return NULL; | |
904 | } | |
905 | ||
906 | static void | |
907 | xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) | |
908 | { | |
909 | struct rb_node **p = &xprt->recv_queue.rb_node; | |
910 | struct rb_node *n = NULL; | |
911 | struct rpc_rqst *req; | |
912 | ||
913 | while (*p != NULL) { | |
914 | n = *p; | |
915 | req = rb_entry(n, struct rpc_rqst, rq_recv); | |
916 | switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) { | |
917 | case XID_RB_LEFT: | |
918 | p = &n->rb_left; | |
919 | break; | |
920 | case XID_RB_RIGHT: | |
921 | p = &n->rb_right; | |
922 | break; | |
923 | case XID_RB_EQUAL: | |
924 | WARN_ON_ONCE(new != req); | |
925 | return; | |
926 | } | |
927 | } | |
928 | rb_link_node(&new->rq_recv, n, p); | |
929 | rb_insert_color(&new->rq_recv, &xprt->recv_queue); | |
930 | } | |
931 | ||
932 | static void | |
933 | xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) | |
934 | { | |
935 | rb_erase(&req->rq_recv, &xprt->recv_queue); | |
936 | } | |
937 | ||
9903cd1c CL |
938 | /** |
939 | * xprt_lookup_rqst - find an RPC request corresponding to an XID | |
940 | * @xprt: transport on which the original request was transmitted | |
941 | * @xid: RPC XID of incoming reply | |
942 | * | |
75c84151 | 943 | * Caller holds xprt->queue_lock. |
1da177e4 | 944 | */ |
d8ed029d | 945 | struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) |
1da177e4 | 946 | { |
8f3a6de3 | 947 | struct rpc_rqst *entry; |
1da177e4 | 948 | |
95f7691d TM |
949 | entry = xprt_request_rb_find(xprt, xid); |
950 | if (entry != NULL) { | |
951 | trace_xprt_lookup_rqst(xprt, xid, 0); | |
952 | entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); | |
953 | return entry; | |
954 | } | |
46121cf7 CL |
955 | |
956 | dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n", | |
957 | ntohl(xid)); | |
3705ad64 | 958 | trace_xprt_lookup_rqst(xprt, xid, -ENOENT); |
262ca07d CL |
959 | xprt->stat.bad_xids++; |
960 | return NULL; | |
1da177e4 | 961 | } |
12444809 | 962 | EXPORT_SYMBOL_GPL(xprt_lookup_rqst); |
1da177e4 | 963 | |
cf9946cd TM |
964 | static bool |
965 | xprt_is_pinned_rqst(struct rpc_rqst *req) | |
966 | { | |
967 | return atomic_read(&req->rq_pin) != 0; | |
968 | } | |
969 | ||
729749bb TM |
970 | /** |
971 | * xprt_pin_rqst - Pin a request on the transport receive list | |
972 | * @req: Request to pin | |
973 | * | |
974 | * Caller must ensure this is atomic with the call to xprt_lookup_rqst() | |
cf9946cd | 975 | * so should be holding the xprt receive lock. |
729749bb TM |
976 | */ |
977 | void xprt_pin_rqst(struct rpc_rqst *req) | |
978 | { | |
cf9946cd | 979 | atomic_inc(&req->rq_pin); |
729749bb | 980 | } |
9590d083 | 981 | EXPORT_SYMBOL_GPL(xprt_pin_rqst); |
729749bb TM |
982 | |
983 | /** | |
984 | * xprt_unpin_rqst - Unpin a request on the transport receive list | |
985 | * @req: Request to pin | |
986 | * | |
cf9946cd | 987 | * Caller should be holding the xprt receive lock. |
729749bb TM |
988 | */ |
989 | void xprt_unpin_rqst(struct rpc_rqst *req) | |
990 | { | |
cf9946cd TM |
991 | if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) { |
992 | atomic_dec(&req->rq_pin); | |
993 | return; | |
994 | } | |
995 | if (atomic_dec_and_test(&req->rq_pin)) | |
996 | wake_up_var(&req->rq_pin); | |
729749bb | 997 | } |
9590d083 | 998 | EXPORT_SYMBOL_GPL(xprt_unpin_rqst); |
729749bb TM |
999 | |
1000 | static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req) | |
729749bb | 1001 | { |
cf9946cd | 1002 | wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req)); |
729749bb TM |
1003 | } |
1004 | ||
edc81dcd TM |
1005 | static bool |
1006 | xprt_request_data_received(struct rpc_task *task) | |
1007 | { | |
1008 | return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && | |
1009 | READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0; | |
1010 | } | |
1011 | ||
1012 | static bool | |
1013 | xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) | |
1014 | { | |
1015 | return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) && | |
1016 | READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0; | |
1017 | } | |
1018 | ||
1019 | /** | |
1020 | * xprt_request_enqueue_receive - Add an request to the receive queue | |
1021 | * @task: RPC task | |
1022 | * | |
1023 | */ | |
1024 | void | |
1025 | xprt_request_enqueue_receive(struct rpc_task *task) | |
1026 | { | |
1027 | struct rpc_rqst *req = task->tk_rqstp; | |
1028 | struct rpc_xprt *xprt = req->rq_xprt; | |
1029 | ||
1030 | if (!xprt_request_need_enqueue_receive(task, req)) | |
1031 | return; | |
1032 | spin_lock(&xprt->queue_lock); | |
1033 | ||
1034 | /* Update the softirq receive buffer */ | |
1035 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | |
1036 | sizeof(req->rq_private_buf)); | |
1037 | ||
1038 | /* Add request to the receive list */ | |
95f7691d | 1039 | xprt_request_rb_insert(xprt, req); |
edc81dcd TM |
1040 | set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); |
1041 | spin_unlock(&xprt->queue_lock); | |
1042 | ||
1043 | xprt_reset_majortimeo(req); | |
1044 | /* Turn off autodisconnect */ | |
1045 | del_singleshot_timer_sync(&xprt->timer); | |
1046 | } | |
1047 | ||
1048 | /** | |
1049 | * xprt_request_dequeue_receive_locked - Remove a request from the receive queue | |
1050 | * @task: RPC task | |
1051 | * | |
1052 | * Caller must hold xprt->queue_lock. | |
1053 | */ | |
1054 | static void | |
1055 | xprt_request_dequeue_receive_locked(struct rpc_task *task) | |
1056 | { | |
95f7691d TM |
1057 | struct rpc_rqst *req = task->tk_rqstp; |
1058 | ||
edc81dcd | 1059 | if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) |
95f7691d | 1060 | xprt_request_rb_remove(req->rq_xprt, req); |
edc81dcd TM |
1061 | } |
1062 | ||
ecd465ee CL |
1063 | /** |
1064 | * xprt_update_rtt - Update RPC RTT statistics | |
1065 | * @task: RPC request that recently completed | |
1066 | * | |
75c84151 | 1067 | * Caller holds xprt->queue_lock. |
ecd465ee CL |
1068 | */ |
1069 | void xprt_update_rtt(struct rpc_task *task) | |
1570c1e4 CL |
1070 | { |
1071 | struct rpc_rqst *req = task->tk_rqstp; | |
1072 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; | |
95c96174 | 1073 | unsigned int timer = task->tk_msg.rpc_proc->p_timer; |
d60dbb20 | 1074 | long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); |
1570c1e4 CL |
1075 | |
1076 | if (timer) { | |
1077 | if (req->rq_ntrans == 1) | |
ff839970 | 1078 | rpc_update_rtt(rtt, timer, m); |
1570c1e4 CL |
1079 | rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); |
1080 | } | |
1081 | } | |
ecd465ee | 1082 | EXPORT_SYMBOL_GPL(xprt_update_rtt); |
1570c1e4 | 1083 | |
9903cd1c CL |
1084 | /** |
1085 | * xprt_complete_rqst - called when reply processing is complete | |
1570c1e4 | 1086 | * @task: RPC request that recently completed |
9903cd1c CL |
1087 | * @copied: actual number of bytes received from the transport |
1088 | * | |
75c84151 | 1089 | * Caller holds xprt->queue_lock. |
1da177e4 | 1090 | */ |
1570c1e4 | 1091 | void xprt_complete_rqst(struct rpc_task *task, int copied) |
1da177e4 | 1092 | { |
1570c1e4 | 1093 | struct rpc_rqst *req = task->tk_rqstp; |
fda13939 | 1094 | struct rpc_xprt *xprt = req->rq_xprt; |
1da177e4 | 1095 | |
1570c1e4 CL |
1096 | dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", |
1097 | task->tk_pid, ntohl(req->rq_xid), copied); | |
3705ad64 | 1098 | trace_xprt_complete_rqst(xprt, req->rq_xid, copied); |
1da177e4 | 1099 | |
fda13939 | 1100 | xprt->stat.recvs++; |
ef759a2e | 1101 | |
1e799b67 | 1102 | req->rq_private_buf.len = copied; |
dd2b63d0 RL |
1103 | /* Ensure all writes are done before we update */ |
1104 | /* req->rq_reply_bytes_recvd */ | |
43ac3f29 | 1105 | smp_wmb(); |
dd2b63d0 | 1106 | req->rq_reply_bytes_recvd = copied; |
edc81dcd | 1107 | xprt_request_dequeue_receive_locked(task); |
fda13939 | 1108 | rpc_wake_up_queued_task(&xprt->pending, task); |
1da177e4 | 1109 | } |
12444809 | 1110 | EXPORT_SYMBOL_GPL(xprt_complete_rqst); |
1da177e4 | 1111 | |
46c0ee8b | 1112 | static void xprt_timer(struct rpc_task *task) |
1da177e4 | 1113 | { |
46c0ee8b | 1114 | struct rpc_rqst *req = task->tk_rqstp; |
1da177e4 LT |
1115 | struct rpc_xprt *xprt = req->rq_xprt; |
1116 | ||
5d00837b TM |
1117 | if (task->tk_status != -ETIMEDOUT) |
1118 | return; | |
1da177e4 | 1119 | |
82476d9f | 1120 | trace_xprt_timer(xprt, req->rq_xid, task->tk_status); |
dd2b63d0 | 1121 | if (!req->rq_reply_bytes_recvd) { |
46c0ee8b | 1122 | if (xprt->ops->timer) |
6a24dfb6 | 1123 | xprt->ops->timer(xprt, task); |
5d00837b TM |
1124 | } else |
1125 | task->tk_status = 0; | |
1da177e4 LT |
1126 | } |
1127 | ||
7f3a1d1e TM |
1128 | /** |
1129 | * xprt_request_wait_receive - wait for the reply to an RPC request | |
1130 | * @task: RPC task about to send a request | |
1131 | * | |
1132 | */ | |
1133 | void xprt_request_wait_receive(struct rpc_task *task) | |
1134 | { | |
1135 | struct rpc_rqst *req = task->tk_rqstp; | |
1136 | struct rpc_xprt *xprt = req->rq_xprt; | |
1137 | ||
1138 | if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) | |
1139 | return; | |
1140 | /* | |
1141 | * Sleep on the pending queue if we're expecting a reply. | |
1142 | * The spinlock ensures atomicity between the test of | |
1143 | * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). | |
1144 | */ | |
1145 | spin_lock(&xprt->queue_lock); | |
1146 | if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { | |
1147 | xprt->ops->set_retrans_timeout(task); | |
1148 | rpc_sleep_on(&xprt->pending, task, xprt_timer); | |
1149 | /* | |
1150 | * Send an extra queue wakeup call if the | |
1151 | * connection was dropped in case the call to | |
1152 | * rpc_sleep_on() raced. | |
1153 | */ | |
1154 | if (xprt_request_retransmit_after_disconnect(task)) | |
1155 | rpc_wake_up_queued_task_set_status(&xprt->pending, | |
1156 | task, -ENOTCONN); | |
1157 | } | |
1158 | spin_unlock(&xprt->queue_lock); | |
1159 | } | |
1160 | ||
944b0429 TM |
1161 | static bool |
1162 | xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req) | |
1163 | { | |
762e4e67 | 1164 | return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); |
944b0429 TM |
1165 | } |
1166 | ||
1167 | /** | |
1168 | * xprt_request_enqueue_transmit - queue a task for transmission | |
1169 | * @task: pointer to rpc_task | |
1170 | * | |
1171 | * Add a task to the transmission queue. | |
1172 | */ | |
1173 | void | |
1174 | xprt_request_enqueue_transmit(struct rpc_task *task) | |
1175 | { | |
918f3c1f | 1176 | struct rpc_rqst *pos, *req = task->tk_rqstp; |
944b0429 TM |
1177 | struct rpc_xprt *xprt = req->rq_xprt; |
1178 | ||
1179 | if (xprt_request_need_enqueue_transmit(task, req)) { | |
1180 | spin_lock(&xprt->queue_lock); | |
75891f50 TM |
1181 | /* |
1182 | * Requests that carry congestion control credits are added | |
1183 | * to the head of the list to avoid starvation issues. | |
1184 | */ | |
1185 | if (req->rq_cong) { | |
1186 | xprt_clear_congestion_window_wait(xprt); | |
1187 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { | |
1188 | if (pos->rq_cong) | |
1189 | continue; | |
1190 | /* Note: req is added _before_ pos */ | |
1191 | list_add_tail(&req->rq_xmit, &pos->rq_xmit); | |
1192 | INIT_LIST_HEAD(&req->rq_xmit2); | |
1193 | goto out; | |
1194 | } | |
86aeee0e TM |
1195 | } else if (RPC_IS_SWAPPER(task)) { |
1196 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { | |
1197 | if (pos->rq_cong || pos->rq_bytes_sent) | |
1198 | continue; | |
1199 | if (RPC_IS_SWAPPER(pos->rq_task)) | |
1200 | continue; | |
1201 | /* Note: req is added _before_ pos */ | |
1202 | list_add_tail(&req->rq_xmit, &pos->rq_xmit); | |
1203 | INIT_LIST_HEAD(&req->rq_xmit2); | |
1204 | goto out; | |
1205 | } | |
75891f50 TM |
1206 | } else { |
1207 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { | |
1208 | if (pos->rq_task->tk_owner != task->tk_owner) | |
1209 | continue; | |
1210 | list_add_tail(&req->rq_xmit2, &pos->rq_xmit2); | |
1211 | INIT_LIST_HEAD(&req->rq_xmit); | |
1212 | goto out; | |
1213 | } | |
918f3c1f | 1214 | } |
944b0429 | 1215 | list_add_tail(&req->rq_xmit, &xprt->xmit_queue); |
918f3c1f TM |
1216 | INIT_LIST_HEAD(&req->rq_xmit2); |
1217 | out: | |
944b0429 TM |
1218 | set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); |
1219 | spin_unlock(&xprt->queue_lock); | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | /** | |
1224 | * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue | |
1225 | * @task: pointer to rpc_task | |
1226 | * | |
1227 | * Remove a task from the transmission queue | |
1228 | * Caller must hold xprt->queue_lock | |
1229 | */ | |
1230 | static void | |
1231 | xprt_request_dequeue_transmit_locked(struct rpc_task *task) | |
1232 | { | |
918f3c1f TM |
1233 | struct rpc_rqst *req = task->tk_rqstp; |
1234 | ||
1235 | if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) | |
1236 | return; | |
1237 | if (!list_empty(&req->rq_xmit)) { | |
1238 | list_del(&req->rq_xmit); | |
1239 | if (!list_empty(&req->rq_xmit2)) { | |
1240 | struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, | |
1241 | struct rpc_rqst, rq_xmit2); | |
1242 | list_del(&req->rq_xmit2); | |
1243 | list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue); | |
1244 | } | |
1245 | } else | |
1246 | list_del(&req->rq_xmit2); | |
944b0429 TM |
1247 | } |
1248 | ||
1249 | /** | |
1250 | * xprt_request_dequeue_transmit - remove a task from the transmission queue | |
1251 | * @task: pointer to rpc_task | |
1252 | * | |
1253 | * Remove a task from the transmission queue | |
1254 | */ | |
1255 | static void | |
1256 | xprt_request_dequeue_transmit(struct rpc_task *task) | |
1257 | { | |
1258 | struct rpc_rqst *req = task->tk_rqstp; | |
1259 | struct rpc_xprt *xprt = req->rq_xprt; | |
1260 | ||
1261 | spin_lock(&xprt->queue_lock); | |
1262 | xprt_request_dequeue_transmit_locked(task); | |
1263 | spin_unlock(&xprt->queue_lock); | |
1264 | } | |
1265 | ||
762e4e67 TM |
1266 | /** |
1267 | * xprt_request_need_retransmit - Test if a task needs retransmission | |
1268 | * @task: pointer to rpc_task | |
1269 | * | |
1270 | * Test for whether a connection breakage requires the task to retransmit | |
1271 | */ | |
1272 | bool | |
1273 | xprt_request_need_retransmit(struct rpc_task *task) | |
1274 | { | |
1275 | return xprt_request_retransmit_after_disconnect(task); | |
1276 | } | |
1277 | ||
9903cd1c CL |
1278 | /** |
1279 | * xprt_prepare_transmit - reserve the transport before sending a request | |
1280 | * @task: RPC task about to send a request | |
1281 | * | |
1da177e4 | 1282 | */ |
90051ea7 | 1283 | bool xprt_prepare_transmit(struct rpc_task *task) |
1da177e4 LT |
1284 | { |
1285 | struct rpc_rqst *req = task->tk_rqstp; | |
1286 | struct rpc_xprt *xprt = req->rq_xprt; | |
1da177e4 | 1287 | |
46121cf7 | 1288 | dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); |
1da177e4 | 1289 | |
5f2f6bd9 TM |
1290 | if (!xprt_lock_write(xprt, task)) { |
1291 | /* Race breaker: someone may have transmitted us */ | |
944b0429 | 1292 | if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) |
5f2f6bd9 TM |
1293 | rpc_wake_up_queued_task_set_status(&xprt->sending, |
1294 | task, 0); | |
1295 | return false; | |
1296 | ||
90051ea7 | 1297 | } |
5f2f6bd9 | 1298 | return true; |
1da177e4 LT |
1299 | } |
1300 | ||
e0ab53de | 1301 | void xprt_end_transmit(struct rpc_task *task) |
5e5ce5be | 1302 | { |
343952fa | 1303 | xprt_release_write(task->tk_rqstp->rq_xprt, task); |
5e5ce5be TM |
1304 | } |
1305 | ||
9903cd1c | 1306 | /** |
89f90fe1 TM |
1307 | * xprt_request_transmit - send an RPC request on a transport |
1308 | * @req: pointer to request to transmit | |
1309 | * @snd_task: RPC task that owns the transport lock | |
9903cd1c | 1310 | * |
89f90fe1 TM |
1311 | * This performs the transmission of a single request. |
1312 | * Note that if the request is not the same as snd_task, then it | |
1313 | * does need to be pinned. | |
1314 | * Returns '0' on success. | |
9903cd1c | 1315 | */ |
89f90fe1 TM |
1316 | static int |
1317 | xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) | |
1da177e4 | 1318 | { |
89f90fe1 TM |
1319 | struct rpc_xprt *xprt = req->rq_xprt; |
1320 | struct rpc_task *task = req->rq_task; | |
90d91b0c | 1321 | unsigned int connect_cookie; |
dcbbeda8 | 1322 | int is_retrans = RPC_WAS_SENT(task); |
ff699ea8 | 1323 | int status; |
1da177e4 | 1324 | |
46121cf7 | 1325 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
1da177e4 | 1326 | |
edc81dcd | 1327 | if (!req->rq_bytes_sent) { |
89f90fe1 TM |
1328 | if (xprt_request_data_received(task)) { |
1329 | status = 0; | |
944b0429 | 1330 | goto out_dequeue; |
89f90fe1 | 1331 | } |
3021a5bb | 1332 | /* Verify that our message lies in the RPCSEC_GSS window */ |
edc81dcd | 1333 | if (rpcauth_xmit_need_reencode(task)) { |
89f90fe1 | 1334 | status = -EBADMSG; |
944b0429 | 1335 | goto out_dequeue; |
3021a5bb | 1336 | } |
edc81dcd | 1337 | } |
1da177e4 | 1338 | |
dcbbeda8 TM |
1339 | /* |
1340 | * Update req->rq_ntrans before transmitting to avoid races with | |
1341 | * xprt_update_rtt(), which needs to know that it is recording a | |
1342 | * reply to the first transmission. | |
1343 | */ | |
1344 | req->rq_ntrans++; | |
1345 | ||
90d91b0c | 1346 | connect_cookie = xprt->connect_cookie; |
adfa7144 | 1347 | status = xprt->ops->send_request(req); |
3705ad64 | 1348 | trace_xprt_transmit(xprt, req->rq_xid, status); |
c8485e4d | 1349 | if (status != 0) { |
dcbbeda8 | 1350 | req->rq_ntrans--; |
89f90fe1 | 1351 | return status; |
c8485e4d | 1352 | } |
7ebbbc6e | 1353 | |
dcbbeda8 TM |
1354 | if (is_retrans) |
1355 | task->tk_client->cl_stats->rpcretrans++; | |
1356 | ||
4a068258 | 1357 | xprt_inject_disconnect(xprt); |
262ca07d | 1358 | |
c8485e4d | 1359 | dprintk("RPC: %5u xmit complete\n", task->tk_pid); |
468f8613 | 1360 | task->tk_flags |= RPC_TASK_SENT; |
c8485e4d | 1361 | spin_lock_bh(&xprt->transport_lock); |
262ca07d | 1362 | |
c8485e4d TM |
1363 | xprt->stat.sends++; |
1364 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; | |
1365 | xprt->stat.bklog_u += xprt->backlog.qlen; | |
15a45206 AA |
1366 | xprt->stat.sending_u += xprt->sending.qlen; |
1367 | xprt->stat.pending_u += xprt->pending.qlen; | |
90d91b0c | 1368 | spin_unlock_bh(&xprt->transport_lock); |
1da177e4 | 1369 | |
90d91b0c | 1370 | req->rq_connect_cookie = connect_cookie; |
944b0429 TM |
1371 | out_dequeue: |
1372 | xprt_request_dequeue_transmit(task); | |
89f90fe1 TM |
1373 | rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); |
1374 | return status; | |
1375 | } | |
1376 | ||
1377 | /** | |
1378 | * xprt_transmit - send an RPC request on a transport | |
1379 | * @task: controlling RPC task | |
1380 | * | |
1381 | * Attempts to drain the transmit queue. On exit, either the transport | |
1382 | * signalled an error that needs to be handled before transmission can | |
1383 | * resume, or @task finished transmitting, and detected that it already | |
1384 | * received a reply. | |
1385 | */ | |
1386 | void | |
1387 | xprt_transmit(struct rpc_task *task) | |
1388 | { | |
1389 | struct rpc_rqst *next, *req = task->tk_rqstp; | |
1390 | struct rpc_xprt *xprt = req->rq_xprt; | |
1391 | int status; | |
1392 | ||
1393 | spin_lock(&xprt->queue_lock); | |
1394 | while (!list_empty(&xprt->xmit_queue)) { | |
1395 | next = list_first_entry(&xprt->xmit_queue, | |
1396 | struct rpc_rqst, rq_xmit); | |
1397 | xprt_pin_rqst(next); | |
1398 | spin_unlock(&xprt->queue_lock); | |
1399 | status = xprt_request_transmit(next, task); | |
1400 | if (status == -EBADMSG && next != req) | |
1401 | status = 0; | |
1402 | cond_resched(); | |
1403 | spin_lock(&xprt->queue_lock); | |
1404 | xprt_unpin_rqst(next); | |
1405 | if (status == 0) { | |
1406 | if (!xprt_request_data_received(task) || | |
1407 | test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) | |
1408 | continue; | |
c544577d | 1409 | } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) |
89f90fe1 TM |
1410 | task->tk_status = status; |
1411 | break; | |
1412 | } | |
1413 | spin_unlock(&xprt->queue_lock); | |
1da177e4 LT |
1414 | } |
1415 | ||
ba60eb25 TM |
1416 | static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) |
1417 | { | |
1418 | set_bit(XPRT_CONGESTED, &xprt->state); | |
1419 | rpc_sleep_on(&xprt->backlog, task, NULL); | |
1420 | } | |
1421 | ||
1422 | static void xprt_wake_up_backlog(struct rpc_xprt *xprt) | |
1423 | { | |
1424 | if (rpc_wake_up_next(&xprt->backlog) == NULL) | |
1425 | clear_bit(XPRT_CONGESTED, &xprt->state); | |
1426 | } | |
1427 | ||
1428 | static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) | |
1429 | { | |
1430 | bool ret = false; | |
1431 | ||
1432 | if (!test_bit(XPRT_CONGESTED, &xprt->state)) | |
1433 | goto out; | |
1434 | spin_lock(&xprt->reserve_lock); | |
1435 | if (test_bit(XPRT_CONGESTED, &xprt->state)) { | |
1436 | rpc_sleep_on(&xprt->backlog, task, NULL); | |
1437 | ret = true; | |
1438 | } | |
1439 | spin_unlock(&xprt->reserve_lock); | |
1440 | out: | |
1441 | return ret; | |
1442 | } | |
1443 | ||
92ea011f | 1444 | static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) |
d9ba131d TM |
1445 | { |
1446 | struct rpc_rqst *req = ERR_PTR(-EAGAIN); | |
1447 | ||
ff699ea8 | 1448 | if (xprt->num_reqs >= xprt->max_reqs) |
d9ba131d | 1449 | goto out; |
ff699ea8 | 1450 | ++xprt->num_reqs; |
92ea011f TM |
1451 | spin_unlock(&xprt->reserve_lock); |
1452 | req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); | |
1453 | spin_lock(&xprt->reserve_lock); | |
d9ba131d TM |
1454 | if (req != NULL) |
1455 | goto out; | |
ff699ea8 | 1456 | --xprt->num_reqs; |
d9ba131d TM |
1457 | req = ERR_PTR(-ENOMEM); |
1458 | out: | |
1459 | return req; | |
1460 | } | |
1461 | ||
1462 | static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | |
1463 | { | |
ff699ea8 CL |
1464 | if (xprt->num_reqs > xprt->min_reqs) { |
1465 | --xprt->num_reqs; | |
d9ba131d TM |
1466 | kfree(req); |
1467 | return true; | |
1468 | } | |
1469 | return false; | |
1470 | } | |
1471 | ||
f39c1bfb | 1472 | void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) |
1da177e4 | 1473 | { |
d9ba131d | 1474 | struct rpc_rqst *req; |
1da177e4 | 1475 | |
f39c1bfb | 1476 | spin_lock(&xprt->reserve_lock); |
1da177e4 | 1477 | if (!list_empty(&xprt->free)) { |
d9ba131d TM |
1478 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); |
1479 | list_del(&req->rq_list); | |
1480 | goto out_init_req; | |
1481 | } | |
92ea011f | 1482 | req = xprt_dynamic_alloc_slot(xprt); |
d9ba131d TM |
1483 | if (!IS_ERR(req)) |
1484 | goto out_init_req; | |
1485 | switch (PTR_ERR(req)) { | |
1486 | case -ENOMEM: | |
d9ba131d TM |
1487 | dprintk("RPC: dynamic allocation of request slot " |
1488 | "failed! Retrying\n"); | |
1afeaf5c | 1489 | task->tk_status = -ENOMEM; |
d9ba131d TM |
1490 | break; |
1491 | case -EAGAIN: | |
ba60eb25 | 1492 | xprt_add_backlog(xprt, task); |
d9ba131d | 1493 | dprintk("RPC: waiting for request slot\n"); |
e9d47639 | 1494 | /* fall through */ |
1afeaf5c TM |
1495 | default: |
1496 | task->tk_status = -EAGAIN; | |
1da177e4 | 1497 | } |
f39c1bfb | 1498 | spin_unlock(&xprt->reserve_lock); |
d9ba131d TM |
1499 | return; |
1500 | out_init_req: | |
ff699ea8 CL |
1501 | xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, |
1502 | xprt->num_reqs); | |
37ac86c3 CL |
1503 | spin_unlock(&xprt->reserve_lock); |
1504 | ||
d9ba131d TM |
1505 | task->tk_status = 0; |
1506 | task->tk_rqstp = req; | |
f39c1bfb TM |
1507 | } |
1508 | EXPORT_SYMBOL_GPL(xprt_alloc_slot); | |
1509 | ||
a9cde23a | 1510 | void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) |
ee5ebe85 | 1511 | { |
ee5ebe85 | 1512 | spin_lock(&xprt->reserve_lock); |
c25573b5 TM |
1513 | if (!xprt_dynamic_free_slot(xprt, req)) { |
1514 | memset(req, 0, sizeof(*req)); /* mark unused */ | |
1515 | list_add(&req->rq_list, &xprt->free); | |
1516 | } | |
ba60eb25 | 1517 | xprt_wake_up_backlog(xprt); |
ee5ebe85 TM |
1518 | spin_unlock(&xprt->reserve_lock); |
1519 | } | |
a9cde23a | 1520 | EXPORT_SYMBOL_GPL(xprt_free_slot); |
ee5ebe85 | 1521 | |
21de0a95 TM |
1522 | static void xprt_free_all_slots(struct rpc_xprt *xprt) |
1523 | { | |
1524 | struct rpc_rqst *req; | |
1525 | while (!list_empty(&xprt->free)) { | |
1526 | req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); | |
1527 | list_del(&req->rq_list); | |
1528 | kfree(req); | |
1529 | } | |
1530 | } | |
1531 | ||
d9ba131d TM |
1532 | struct rpc_xprt *xprt_alloc(struct net *net, size_t size, |
1533 | unsigned int num_prealloc, | |
1534 | unsigned int max_alloc) | |
bd1722d4 PE |
1535 | { |
1536 | struct rpc_xprt *xprt; | |
21de0a95 TM |
1537 | struct rpc_rqst *req; |
1538 | int i; | |
bd1722d4 PE |
1539 | |
1540 | xprt = kzalloc(size, GFP_KERNEL); | |
1541 | if (xprt == NULL) | |
1542 | goto out; | |
1543 | ||
21de0a95 TM |
1544 | xprt_init(xprt, net); |
1545 | ||
1546 | for (i = 0; i < num_prealloc; i++) { | |
1547 | req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); | |
1548 | if (!req) | |
8313164c | 1549 | goto out_free; |
21de0a95 TM |
1550 | list_add(&req->rq_list, &xprt->free); |
1551 | } | |
d9ba131d TM |
1552 | if (max_alloc > num_prealloc) |
1553 | xprt->max_reqs = max_alloc; | |
1554 | else | |
1555 | xprt->max_reqs = num_prealloc; | |
1556 | xprt->min_reqs = num_prealloc; | |
ff699ea8 | 1557 | xprt->num_reqs = num_prealloc; |
bd1722d4 PE |
1558 | |
1559 | return xprt; | |
1560 | ||
1561 | out_free: | |
21de0a95 | 1562 | xprt_free(xprt); |
bd1722d4 PE |
1563 | out: |
1564 | return NULL; | |
1565 | } | |
1566 | EXPORT_SYMBOL_GPL(xprt_alloc); | |
1567 | ||
e204e621 PE |
1568 | void xprt_free(struct rpc_xprt *xprt) |
1569 | { | |
37aa2133 | 1570 | put_net(xprt->xprt_net); |
21de0a95 | 1571 | xprt_free_all_slots(xprt); |
fda1bfef | 1572 | kfree_rcu(xprt, rcu); |
e204e621 PE |
1573 | } |
1574 | EXPORT_SYMBOL_GPL(xprt_free); | |
1575 | ||
902c5887 TM |
1576 | static void |
1577 | xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) | |
1578 | { | |
1579 | req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; | |
1580 | } | |
1581 | ||
9dc6edcf TM |
1582 | static __be32 |
1583 | xprt_alloc_xid(struct rpc_xprt *xprt) | |
1584 | { | |
1585 | __be32 xid; | |
1586 | ||
1587 | spin_lock(&xprt->reserve_lock); | |
1588 | xid = (__force __be32)xprt->xid++; | |
1589 | spin_unlock(&xprt->reserve_lock); | |
1590 | return xid; | |
1591 | } | |
1592 | ||
1593 | static void | |
1594 | xprt_init_xid(struct rpc_xprt *xprt) | |
1595 | { | |
1596 | xprt->xid = prandom_u32(); | |
1597 | } | |
1598 | ||
1599 | static void | |
1600 | xprt_request_init(struct rpc_task *task) | |
1601 | { | |
1602 | struct rpc_xprt *xprt = task->tk_xprt; | |
1603 | struct rpc_rqst *req = task->tk_rqstp; | |
1604 | ||
9dc6edcf TM |
1605 | req->rq_timeout = task->tk_client->cl_timeout->to_initval; |
1606 | req->rq_task = task; | |
1607 | req->rq_xprt = xprt; | |
1608 | req->rq_buffer = NULL; | |
1609 | req->rq_xid = xprt_alloc_xid(xprt); | |
902c5887 | 1610 | xprt_init_connect_cookie(req, xprt); |
9dc6edcf TM |
1611 | req->rq_bytes_sent = 0; |
1612 | req->rq_snd_buf.len = 0; | |
1613 | req->rq_snd_buf.buflen = 0; | |
1614 | req->rq_rcv_buf.len = 0; | |
1615 | req->rq_rcv_buf.buflen = 0; | |
1616 | req->rq_release_snd_buf = NULL; | |
1617 | xprt_reset_majortimeo(req); | |
1618 | dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, | |
1619 | req, ntohl(req->rq_xid)); | |
1620 | } | |
1621 | ||
1622 | static void | |
1623 | xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) | |
1624 | { | |
1625 | xprt->ops->alloc_slot(xprt, task); | |
1626 | if (task->tk_rqstp != NULL) | |
1627 | xprt_request_init(task); | |
1628 | } | |
1629 | ||
9903cd1c CL |
1630 | /** |
1631 | * xprt_reserve - allocate an RPC request slot | |
1632 | * @task: RPC task requesting a slot allocation | |
1633 | * | |
ba60eb25 TM |
1634 | * If the transport is marked as being congested, or if no more |
1635 | * slots are available, place the task on the transport's | |
9903cd1c CL |
1636 | * backlog queue. |
1637 | */ | |
1638 | void xprt_reserve(struct rpc_task *task) | |
1da177e4 | 1639 | { |
fb43d172 | 1640 | struct rpc_xprt *xprt = task->tk_xprt; |
1da177e4 | 1641 | |
43cedbf0 TM |
1642 | task->tk_status = 0; |
1643 | if (task->tk_rqstp != NULL) | |
1644 | return; | |
1645 | ||
43cedbf0 TM |
1646 | task->tk_timeout = 0; |
1647 | task->tk_status = -EAGAIN; | |
ba60eb25 | 1648 | if (!xprt_throttle_congested(xprt, task)) |
9dc6edcf | 1649 | xprt_do_reserve(xprt, task); |
ba60eb25 TM |
1650 | } |
1651 | ||
1652 | /** | |
1653 | * xprt_retry_reserve - allocate an RPC request slot | |
1654 | * @task: RPC task requesting a slot allocation | |
1655 | * | |
1656 | * If no more slots are available, place the task on the transport's | |
1657 | * backlog queue. | |
1658 | * Note that the only difference with xprt_reserve is that we now | |
1659 | * ignore the value of the XPRT_CONGESTED flag. | |
1660 | */ | |
1661 | void xprt_retry_reserve(struct rpc_task *task) | |
1662 | { | |
fb43d172 | 1663 | struct rpc_xprt *xprt = task->tk_xprt; |
ba60eb25 TM |
1664 | |
1665 | task->tk_status = 0; | |
1666 | if (task->tk_rqstp != NULL) | |
1667 | return; | |
1668 | ||
1669 | task->tk_timeout = 0; | |
1670 | task->tk_status = -EAGAIN; | |
9dc6edcf | 1671 | xprt_do_reserve(xprt, task); |
1da177e4 LT |
1672 | } |
1673 | ||
edc81dcd TM |
1674 | static void |
1675 | xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req) | |
1676 | { | |
1677 | struct rpc_xprt *xprt = req->rq_xprt; | |
1678 | ||
944b0429 TM |
1679 | if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || |
1680 | test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || | |
edc81dcd TM |
1681 | xprt_is_pinned_rqst(req)) { |
1682 | spin_lock(&xprt->queue_lock); | |
944b0429 | 1683 | xprt_request_dequeue_transmit_locked(task); |
edc81dcd TM |
1684 | xprt_request_dequeue_receive_locked(task); |
1685 | while (xprt_is_pinned_rqst(req)) { | |
1686 | set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); | |
1687 | spin_unlock(&xprt->queue_lock); | |
1688 | xprt_wait_on_pinned_rqst(req); | |
1689 | spin_lock(&xprt->queue_lock); | |
1690 | clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); | |
1691 | } | |
1692 | spin_unlock(&xprt->queue_lock); | |
1693 | } | |
1694 | } | |
1695 | ||
9903cd1c CL |
1696 | /** |
1697 | * xprt_release - release an RPC request slot | |
1698 | * @task: task which is finished with the slot | |
1699 | * | |
1da177e4 | 1700 | */ |
9903cd1c | 1701 | void xprt_release(struct rpc_task *task) |
1da177e4 | 1702 | { |
55ae1aab | 1703 | struct rpc_xprt *xprt; |
87ed5003 | 1704 | struct rpc_rqst *req = task->tk_rqstp; |
1da177e4 | 1705 | |
87ed5003 TM |
1706 | if (req == NULL) { |
1707 | if (task->tk_client) { | |
fb43d172 | 1708 | xprt = task->tk_xprt; |
bd79bc57 | 1709 | xprt_release_write(xprt, task); |
87ed5003 | 1710 | } |
1da177e4 | 1711 | return; |
87ed5003 | 1712 | } |
55ae1aab | 1713 | |
55ae1aab | 1714 | xprt = req->rq_xprt; |
0a702195 WAA |
1715 | if (task->tk_ops->rpc_count_stats != NULL) |
1716 | task->tk_ops->rpc_count_stats(task, task->tk_calldata); | |
1717 | else if (task->tk_client) | |
1718 | rpc_count_iostats(task, task->tk_client->cl_metrics); | |
edc81dcd | 1719 | xprt_request_dequeue_all(task, req); |
4a0f8c04 | 1720 | spin_lock_bh(&xprt->transport_lock); |
49e9a890 | 1721 | xprt->ops->release_xprt(xprt, task); |
a58dd398 CL |
1722 | if (xprt->ops->release_request) |
1723 | xprt->ops->release_request(task); | |
1da177e4 | 1724 | xprt->last_used = jiffies; |
ad3331ac | 1725 | xprt_schedule_autodisconnect(xprt); |
4a0f8c04 | 1726 | spin_unlock_bh(&xprt->transport_lock); |
ee5ebe85 | 1727 | if (req->rq_buffer) |
3435c74a | 1728 | xprt->ops->buf_free(task); |
4a068258 | 1729 | xprt_inject_disconnect(xprt); |
a17c2153 TM |
1730 | if (req->rq_cred != NULL) |
1731 | put_rpccred(req->rq_cred); | |
1da177e4 | 1732 | task->tk_rqstp = NULL; |
ead5e1c2 BF |
1733 | if (req->rq_release_snd_buf) |
1734 | req->rq_release_snd_buf(req); | |
55ae1aab | 1735 | |
46121cf7 | 1736 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |
ee5ebe85 | 1737 | if (likely(!bc_prealloc(req))) |
a9cde23a | 1738 | xprt->ops->free_slot(xprt, req); |
ee5ebe85 | 1739 | else |
c9acb42e | 1740 | xprt_free_bc_request(req); |
1da177e4 LT |
1741 | } |
1742 | ||
902c5887 TM |
1743 | #ifdef CONFIG_SUNRPC_BACKCHANNEL |
1744 | void | |
1745 | xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) | |
1746 | { | |
1747 | struct xdr_buf *xbufp = &req->rq_snd_buf; | |
1748 | ||
1749 | task->tk_rqstp = req; | |
1750 | req->rq_task = task; | |
1751 | xprt_init_connect_cookie(req, req->rq_xprt); | |
1752 | /* | |
1753 | * Set up the xdr_buf length. | |
1754 | * This also indicates that the buffer is XDR encoded already. | |
1755 | */ | |
1756 | xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + | |
1757 | xbufp->tail[0].iov_len; | |
1758 | req->rq_bytes_sent = 0; | |
1759 | } | |
1760 | #endif | |
1761 | ||
21de0a95 | 1762 | static void xprt_init(struct rpc_xprt *xprt, struct net *net) |
c2866763 | 1763 | { |
30c5116b | 1764 | kref_init(&xprt->kref); |
c2866763 CL |
1765 | |
1766 | spin_lock_init(&xprt->transport_lock); | |
1767 | spin_lock_init(&xprt->reserve_lock); | |
75c84151 | 1768 | spin_lock_init(&xprt->queue_lock); |
c2866763 CL |
1769 | |
1770 | INIT_LIST_HEAD(&xprt->free); | |
95f7691d | 1771 | xprt->recv_queue = RB_ROOT; |
944b0429 | 1772 | INIT_LIST_HEAD(&xprt->xmit_queue); |
9e00abc3 | 1773 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
f9acac1a RL |
1774 | spin_lock_init(&xprt->bc_pa_lock); |
1775 | INIT_LIST_HEAD(&xprt->bc_pa_list); | |
9e00abc3 | 1776 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
80b14d5e | 1777 | INIT_LIST_HEAD(&xprt->xprt_switch); |
f9acac1a | 1778 | |
c2866763 CL |
1779 | xprt->last_used = jiffies; |
1780 | xprt->cwnd = RPC_INITCWND; | |
a509050b | 1781 | xprt->bind_index = 0; |
c2866763 CL |
1782 | |
1783 | rpc_init_wait_queue(&xprt->binding, "xprt_binding"); | |
1784 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | |
79c99152 | 1785 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); |
c2866763 CL |
1786 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); |
1787 | ||
c2866763 CL |
1788 | xprt_init_xid(xprt); |
1789 | ||
21de0a95 | 1790 | xprt->xprt_net = get_net(net); |
8d9266ff TM |
1791 | } |
1792 | ||
1793 | /** | |
1794 | * xprt_create_transport - create an RPC transport | |
1795 | * @args: rpc transport creation arguments | |
1796 | * | |
1797 | */ | |
1798 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | |
1799 | { | |
1800 | struct rpc_xprt *xprt; | |
1801 | struct xprt_class *t; | |
1802 | ||
1803 | spin_lock(&xprt_list_lock); | |
1804 | list_for_each_entry(t, &xprt_list, list) { | |
1805 | if (t->ident == args->ident) { | |
1806 | spin_unlock(&xprt_list_lock); | |
1807 | goto found; | |
1808 | } | |
1809 | } | |
1810 | spin_unlock(&xprt_list_lock); | |
3c45ddf8 | 1811 | dprintk("RPC: transport (%d) not supported\n", args->ident); |
8d9266ff TM |
1812 | return ERR_PTR(-EIO); |
1813 | ||
1814 | found: | |
1815 | xprt = t->setup(args); | |
1816 | if (IS_ERR(xprt)) { | |
1817 | dprintk("RPC: xprt_create_transport: failed, %ld\n", | |
1818 | -PTR_ERR(xprt)); | |
21de0a95 | 1819 | goto out; |
8d9266ff | 1820 | } |
33d90ac0 BF |
1821 | if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) |
1822 | xprt->idle_timeout = 0; | |
21de0a95 TM |
1823 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
1824 | if (xprt_has_timer(xprt)) | |
ff861c4d | 1825 | timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); |
21de0a95 | 1826 | else |
ff861c4d | 1827 | timer_setup(&xprt->timer, NULL, 0); |
4e0038b6 TM |
1828 | |
1829 | if (strlen(args->servername) > RPC_MAXNETNAMELEN) { | |
1830 | xprt_destroy(xprt); | |
1831 | return ERR_PTR(-EINVAL); | |
1832 | } | |
1833 | xprt->servername = kstrdup(args->servername, GFP_KERNEL); | |
1834 | if (xprt->servername == NULL) { | |
1835 | xprt_destroy(xprt); | |
1836 | return ERR_PTR(-ENOMEM); | |
1837 | } | |
1838 | ||
3f940098 | 1839 | rpc_xprt_debugfs_register(xprt); |
388f0c77 | 1840 | |
46121cf7 | 1841 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
c2866763 | 1842 | xprt->max_reqs); |
21de0a95 | 1843 | out: |
c2866763 CL |
1844 | return xprt; |
1845 | } | |
1846 | ||
528fd354 TM |
1847 | static void xprt_destroy_cb(struct work_struct *work) |
1848 | { | |
1849 | struct rpc_xprt *xprt = | |
1850 | container_of(work, struct rpc_xprt, task_cleanup); | |
1851 | ||
1852 | rpc_xprt_debugfs_unregister(xprt); | |
1853 | rpc_destroy_wait_queue(&xprt->binding); | |
1854 | rpc_destroy_wait_queue(&xprt->pending); | |
1855 | rpc_destroy_wait_queue(&xprt->sending); | |
1856 | rpc_destroy_wait_queue(&xprt->backlog); | |
1857 | kfree(xprt->servername); | |
1858 | /* | |
1859 | * Tear down transport state and free the rpc_xprt | |
1860 | */ | |
1861 | xprt->ops->destroy(xprt); | |
1862 | } | |
1863 | ||
9903cd1c CL |
1864 | /** |
1865 | * xprt_destroy - destroy an RPC transport, killing off all requests. | |
a8de240a | 1866 | * @xprt: transport to destroy |
9903cd1c | 1867 | * |
1da177e4 | 1868 | */ |
a8de240a | 1869 | static void xprt_destroy(struct rpc_xprt *xprt) |
1da177e4 | 1870 | { |
46121cf7 | 1871 | dprintk("RPC: destroying transport %p\n", xprt); |
79234c3d | 1872 | |
528fd354 TM |
1873 | /* |
1874 | * Exclude transport connect/disconnect handlers and autoclose | |
1875 | */ | |
79234c3d TM |
1876 | wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); |
1877 | ||
0065db32 | 1878 | del_timer_sync(&xprt->timer); |
c8541ecd CL |
1879 | |
1880 | /* | |
528fd354 TM |
1881 | * Destroy sockets etc from the system workqueue so they can |
1882 | * safely flush receive work running on rpciod. | |
c8541ecd | 1883 | */ |
528fd354 TM |
1884 | INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); |
1885 | schedule_work(&xprt->task_cleanup); | |
6b6ca86b | 1886 | } |
1da177e4 | 1887 | |
30c5116b TM |
1888 | static void xprt_destroy_kref(struct kref *kref) |
1889 | { | |
1890 | xprt_destroy(container_of(kref, struct rpc_xprt, kref)); | |
1891 | } | |
1892 | ||
1893 | /** | |
1894 | * xprt_get - return a reference to an RPC transport. | |
1895 | * @xprt: pointer to the transport | |
1896 | * | |
1897 | */ | |
1898 | struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) | |
1899 | { | |
1900 | if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) | |
1901 | return xprt; | |
1902 | return NULL; | |
1903 | } | |
1904 | EXPORT_SYMBOL_GPL(xprt_get); | |
1905 | ||
6b6ca86b TM |
1906 | /** |
1907 | * xprt_put - release a reference to an RPC transport. | |
1908 | * @xprt: pointer to the transport | |
1909 | * | |
1910 | */ | |
1911 | void xprt_put(struct rpc_xprt *xprt) | |
1912 | { | |
30c5116b TM |
1913 | if (xprt != NULL) |
1914 | kref_put(&xprt->kref, xprt_destroy_kref); | |
6b6ca86b | 1915 | } |
5d252f90 | 1916 | EXPORT_SYMBOL_GPL(xprt_put); |