]>
Commit | Line | Data |
---|---|---|
426cc91a EVH |
1 | /* |
2 | * linux/fs/9p/mux.c | |
3 | * | |
4 | * Protocol Multiplexer | |
5 | * | |
6 | * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> | |
3cf6429a | 7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <[email protected]> |
426cc91a EVH |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
42e8c509 EVH |
10 | * it under the terms of the GNU General Public License version 2 |
11 | * as published by the Free Software Foundation. | |
426cc91a EVH |
12 | * |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to: | |
20 | * Free Software Foundation | |
21 | * 51 Franklin Street, Fifth Floor | |
22 | * Boston, MA 02111-1301 USA | |
23 | * | |
24 | */ | |
25 | ||
26 | #include <linux/config.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/fs.h> | |
3cf6429a | 30 | #include <linux/poll.h> |
426cc91a EVH |
31 | #include <linux/kthread.h> |
32 | #include <linux/idr.h> | |
4f7a07b8 | 33 | #include <linux/mutex.h> |
426cc91a EVH |
34 | |
35 | #include "debug.h" | |
36 | #include "v9fs.h" | |
37 | #include "9p.h" | |
426cc91a | 38 | #include "conv.h" |
531b1094 | 39 | #include "transport.h" |
426cc91a EVH |
40 | #include "mux.h" |
41 | ||
3cf6429a LI |
42 | #define ERREQFLUSH 1 |
43 | #define SCHED_TIMEOUT 10 | |
44 | #define MAXPOLLWADDR 2 | |
45 | ||
46 | enum { | |
47 | Rworksched = 1, /* read work scheduled or running */ | |
48 | Rpending = 2, /* can read */ | |
49 | Wworksched = 4, /* write work scheduled or running */ | |
50 | Wpending = 8, /* can write */ | |
51 | }; | |
52 | ||
53 | struct v9fs_mux_poll_task; | |
54 | ||
55 | struct v9fs_req { | |
56 | int tag; | |
57 | struct v9fs_fcall *tcall; | |
58 | struct v9fs_fcall *rcall; | |
59 | int err; | |
60 | v9fs_mux_req_callback cb; | |
61 | void *cba; | |
62 | struct list_head req_list; | |
63 | }; | |
64 | ||
65 | struct v9fs_mux_data { | |
66 | spinlock_t lock; | |
67 | struct list_head mux_list; | |
68 | struct v9fs_mux_poll_task *poll_task; | |
69 | int msize; | |
70 | unsigned char *extended; | |
71 | struct v9fs_transport *trans; | |
4a26c242 | 72 | struct v9fs_idpool tagpool; |
3cf6429a LI |
73 | int err; |
74 | wait_queue_head_t equeue; | |
75 | struct list_head req_list; | |
76 | struct list_head unsent_req_list; | |
531b1094 | 77 | struct v9fs_fcall *rcall; |
3cf6429a LI |
78 | int rpos; |
79 | char *rbuf; | |
80 | int wpos; | |
81 | int wsize; | |
82 | char *wbuf; | |
83 | wait_queue_t poll_wait[MAXPOLLWADDR]; | |
84 | wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; | |
85 | poll_table pt; | |
86 | struct work_struct rq; | |
87 | struct work_struct wq; | |
88 | unsigned long wsched; | |
89 | }; | |
90 | ||
91 | struct v9fs_mux_poll_task { | |
92 | struct task_struct *task; | |
93 | struct list_head mux_list; | |
94 | int muxnum; | |
95 | }; | |
96 | ||
97 | struct v9fs_mux_rpc { | |
98 | struct v9fs_mux_data *m; | |
99 | struct v9fs_req *req; | |
100 | int err; | |
101 | struct v9fs_fcall *rcall; | |
102 | wait_queue_head_t wqueue; | |
103 | }; | |
104 | ||
105 | static int v9fs_poll_proc(void *); | |
106 | static void v9fs_read_work(void *); | |
107 | static void v9fs_write_work(void *); | |
108 | static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, | |
109 | poll_table * p); | |
531b1094 LI |
110 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); |
111 | static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); | |
3cf6429a | 112 | |
4f7a07b8 | 113 | static DEFINE_MUTEX(v9fs_mux_task_lock); |
3cf6429a LI |
114 | static struct workqueue_struct *v9fs_mux_wq; |
115 | ||
116 | static int v9fs_mux_num; | |
117 | static int v9fs_mux_poll_task_num; | |
118 | static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100]; | |
119 | ||
1dac06b2 | 120 | int v9fs_mux_global_init(void) |
3cf6429a LI |
121 | { |
122 | int i; | |
123 | ||
124 | for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) | |
125 | v9fs_mux_poll_tasks[i].task = NULL; | |
126 | ||
127 | v9fs_mux_wq = create_workqueue("v9fs"); | |
1dac06b2 LI |
128 | if (!v9fs_mux_wq) |
129 | return -ENOMEM; | |
130 | ||
131 | return 0; | |
3cf6429a | 132 | } |
426cc91a | 133 | |
3cf6429a | 134 | void v9fs_mux_global_exit(void) |
426cc91a | 135 | { |
3cf6429a | 136 | destroy_workqueue(v9fs_mux_wq); |
426cc91a EVH |
137 | } |
138 | ||
139 | /** | |
3cf6429a LI |
140 | * v9fs_mux_calc_poll_procs - calculates the number of polling procs |
141 | * based on the number of mounted v9fs filesystems. | |
426cc91a | 142 | * |
3cf6429a | 143 | * The current implementation returns sqrt of the number of mounts. |
426cc91a | 144 | */ |
29c6e486 | 145 | static int v9fs_mux_calc_poll_procs(int muxnum) |
3cf6429a LI |
146 | { |
147 | int n; | |
148 | ||
149 | if (v9fs_mux_poll_task_num) | |
150 | n = muxnum / v9fs_mux_poll_task_num + | |
151 | (muxnum % v9fs_mux_poll_task_num ? 1 : 0); | |
152 | else | |
153 | n = 1; | |
154 | ||
155 | if (n > ARRAY_SIZE(v9fs_mux_poll_tasks)) | |
156 | n = ARRAY_SIZE(v9fs_mux_poll_tasks); | |
426cc91a | 157 | |
3cf6429a LI |
158 | return n; |
159 | } | |
160 | ||
1dac06b2 | 161 | static int v9fs_mux_poll_start(struct v9fs_mux_data *m) |
426cc91a | 162 | { |
3cf6429a LI |
163 | int i, n; |
164 | struct v9fs_mux_poll_task *vpt, *vptlast; | |
1dac06b2 | 165 | struct task_struct *pproc; |
3cf6429a LI |
166 | |
167 | dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, | |
168 | v9fs_mux_poll_task_num); | |
4f7a07b8 | 169 | mutex_lock(&v9fs_mux_task_lock); |
3cf6429a LI |
170 | |
171 | n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); | |
172 | if (n > v9fs_mux_poll_task_num) { | |
173 | for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) { | |
174 | if (v9fs_mux_poll_tasks[i].task == NULL) { | |
175 | vpt = &v9fs_mux_poll_tasks[i]; | |
176 | dprintk(DEBUG_MUX, "create proc %p\n", vpt); | |
1dac06b2 | 177 | pproc = kthread_create(v9fs_poll_proc, vpt, |
531b1094 | 178 | "v9fs-poll"); |
1dac06b2 LI |
179 | |
180 | if (!IS_ERR(pproc)) { | |
181 | vpt->task = pproc; | |
182 | INIT_LIST_HEAD(&vpt->mux_list); | |
183 | vpt->muxnum = 0; | |
184 | v9fs_mux_poll_task_num++; | |
185 | wake_up_process(vpt->task); | |
186 | } | |
3cf6429a LI |
187 | break; |
188 | } | |
426cc91a | 189 | } |
426cc91a | 190 | |
3cf6429a LI |
191 | if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) |
192 | dprintk(DEBUG_ERROR, "warning: no free poll slots\n"); | |
193 | } | |
426cc91a | 194 | |
3cf6429a LI |
195 | n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num + |
196 | ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0); | |
197 | ||
198 | vptlast = NULL; | |
199 | for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) { | |
200 | vpt = &v9fs_mux_poll_tasks[i]; | |
201 | if (vpt->task != NULL) { | |
202 | vptlast = vpt; | |
203 | if (vpt->muxnum < n) { | |
204 | dprintk(DEBUG_MUX, "put in proc %d\n", i); | |
205 | list_add(&m->mux_list, &vpt->mux_list); | |
206 | vpt->muxnum++; | |
207 | m->poll_task = vpt; | |
208 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | |
209 | init_poll_funcptr(&m->pt, v9fs_pollwait); | |
210 | break; | |
211 | } | |
212 | } | |
426cc91a EVH |
213 | } |
214 | ||
3cf6429a | 215 | if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) { |
1dac06b2 LI |
216 | if (vptlast == NULL) |
217 | return -ENOMEM; | |
218 | ||
3cf6429a LI |
219 | dprintk(DEBUG_MUX, "put in proc %d\n", i); |
220 | list_add(&m->mux_list, &vptlast->mux_list); | |
221 | vptlast->muxnum++; | |
1dac06b2 | 222 | m->poll_task = vptlast; |
3cf6429a LI |
223 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); |
224 | init_poll_funcptr(&m->pt, v9fs_pollwait); | |
426cc91a EVH |
225 | } |
226 | ||
3cf6429a | 227 | v9fs_mux_num++; |
4f7a07b8 | 228 | mutex_unlock(&v9fs_mux_task_lock); |
1dac06b2 LI |
229 | |
230 | return 0; | |
3cf6429a | 231 | } |
426cc91a | 232 | |
3cf6429a LI |
233 | static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) |
234 | { | |
235 | int i; | |
236 | struct v9fs_mux_poll_task *vpt; | |
237 | ||
4f7a07b8 | 238 | mutex_lock(&v9fs_mux_task_lock); |
3cf6429a LI |
239 | vpt = m->poll_task; |
240 | list_del(&m->mux_list); | |
241 | for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | |
242 | if (m->poll_waddr[i] != NULL) { | |
243 | remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); | |
244 | m->poll_waddr[i] = NULL; | |
245 | } | |
246 | } | |
247 | vpt->muxnum--; | |
248 | if (!vpt->muxnum) { | |
249 | dprintk(DEBUG_MUX, "destroy proc %p\n", vpt); | |
250 | send_sig(SIGKILL, vpt->task, 1); | |
251 | vpt->task = NULL; | |
252 | v9fs_mux_poll_task_num--; | |
253 | } | |
254 | v9fs_mux_num--; | |
4f7a07b8 | 255 | mutex_unlock(&v9fs_mux_task_lock); |
3cf6429a | 256 | } |
426cc91a | 257 | |
3cf6429a LI |
258 | /** |
259 | * v9fs_mux_init - allocate and initialize the per-session mux data | |
260 | * Creates the polling task if this is the first session. | |
261 | * | |
262 | * @trans - transport structure | |
263 | * @msize - maximum message size | |
264 | * @extended - pointer to the extended flag | |
265 | */ | |
266 | struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize, | |
267 | unsigned char *extended) | |
268 | { | |
269 | int i, n; | |
270 | struct v9fs_mux_data *m, *mtmp; | |
271 | ||
272 | dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize); | |
531b1094 | 273 | m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL); |
3cf6429a LI |
274 | if (!m) |
275 | return ERR_PTR(-ENOMEM); | |
276 | ||
277 | spin_lock_init(&m->lock); | |
278 | INIT_LIST_HEAD(&m->mux_list); | |
279 | m->msize = msize; | |
280 | m->extended = extended; | |
281 | m->trans = trans; | |
4a26c242 RC |
282 | idr_init(&m->tagpool.pool); |
283 | init_MUTEX(&m->tagpool.lock); | |
3cf6429a LI |
284 | m->err = 0; |
285 | init_waitqueue_head(&m->equeue); | |
286 | INIT_LIST_HEAD(&m->req_list); | |
287 | INIT_LIST_HEAD(&m->unsent_req_list); | |
531b1094 | 288 | m->rcall = NULL; |
3cf6429a | 289 | m->rpos = 0; |
531b1094 | 290 | m->rbuf = NULL; |
3cf6429a | 291 | m->wpos = m->wsize = 0; |
531b1094 | 292 | m->wbuf = NULL; |
3cf6429a LI |
293 | INIT_WORK(&m->rq, v9fs_read_work, m); |
294 | INIT_WORK(&m->wq, v9fs_write_work, m); | |
295 | m->wsched = 0; | |
296 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | |
1dac06b2 LI |
297 | m->poll_task = NULL; |
298 | n = v9fs_mux_poll_start(m); | |
299 | if (n) | |
300 | return ERR_PTR(n); | |
3cf6429a LI |
301 | |
302 | n = trans->poll(trans, &m->pt); | |
303 | if (n & POLLIN) { | |
304 | dprintk(DEBUG_MUX, "mux %p can read\n", m); | |
305 | set_bit(Rpending, &m->wsched); | |
426cc91a EVH |
306 | } |
307 | ||
3cf6429a LI |
308 | if (n & POLLOUT) { |
309 | dprintk(DEBUG_MUX, "mux %p can write\n", m); | |
310 | set_bit(Wpending, &m->wsched); | |
426cc91a EVH |
311 | } |
312 | ||
3cf6429a LI |
313 | for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { |
314 | if (IS_ERR(m->poll_waddr[i])) { | |
315 | v9fs_mux_poll_stop(m); | |
316 | mtmp = (void *)m->poll_waddr; /* the error code */ | |
317 | kfree(m); | |
318 | m = mtmp; | |
319 | break; | |
320 | } | |
426cc91a EVH |
321 | } |
322 | ||
3cf6429a LI |
323 | return m; |
324 | } | |
426cc91a | 325 | |
3cf6429a LI |
326 | /** |
327 | * v9fs_mux_destroy - cancels all pending requests and frees mux resources | |
328 | */ | |
329 | void v9fs_mux_destroy(struct v9fs_mux_data *m) | |
330 | { | |
331 | dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m, | |
332 | m->mux_list.prev, m->mux_list.next); | |
333 | v9fs_mux_cancel(m, -ECONNRESET); | |
334 | ||
335 | if (!list_empty(&m->req_list)) { | |
336 | /* wait until all processes waiting on this session exit */ | |
337 | dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n", | |
338 | m); | |
339 | wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); | |
340 | dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m, | |
341 | list_empty(&m->req_list)); | |
342 | } | |
426cc91a | 343 | |
3cf6429a LI |
344 | v9fs_mux_poll_stop(m); |
345 | m->trans = NULL; | |
426cc91a | 346 | |
3cf6429a | 347 | kfree(m); |
426cc91a EVH |
348 | } |
349 | ||
350 | /** | |
3cf6429a LI |
351 | * v9fs_pollwait - called by files poll operation to add v9fs-poll task |
352 | * to files wait queue | |
426cc91a | 353 | */ |
3cf6429a LI |
354 | static void |
355 | v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, | |
356 | poll_table * p) | |
426cc91a | 357 | { |
3cf6429a LI |
358 | int i; |
359 | struct v9fs_mux_data *m; | |
426cc91a | 360 | |
3cf6429a LI |
361 | m = container_of(p, struct v9fs_mux_data, pt); |
362 | for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) | |
363 | if (m->poll_waddr[i] == NULL) | |
364 | break; | |
cb2e87a6 | 365 | |
3cf6429a LI |
366 | if (i >= ARRAY_SIZE(m->poll_waddr)) { |
367 | dprintk(DEBUG_ERROR, "not enough wait_address slots\n"); | |
368 | return; | |
369 | } | |
cb2e87a6 | 370 | |
3cf6429a | 371 | m->poll_waddr[i] = wait_address; |
cb2e87a6 | 372 | |
3cf6429a LI |
373 | if (!wait_address) { |
374 | dprintk(DEBUG_ERROR, "no wait_address\n"); | |
375 | m->poll_waddr[i] = ERR_PTR(-EIO); | |
376 | return; | |
377 | } | |
426cc91a | 378 | |
3cf6429a LI |
379 | init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); |
380 | add_wait_queue(wait_address, &m->poll_wait[i]); | |
426cc91a EVH |
381 | } |
382 | ||
383 | /** | |
3cf6429a | 384 | * v9fs_poll_mux - polls a mux and schedules read or write works if necessary |
426cc91a | 385 | */ |
29c6e486 | 386 | static void v9fs_poll_mux(struct v9fs_mux_data *m) |
426cc91a | 387 | { |
3cf6429a | 388 | int n; |
426cc91a | 389 | |
3cf6429a LI |
390 | if (m->err < 0) |
391 | return; | |
392 | ||
393 | n = m->trans->poll(m->trans, NULL); | |
394 | if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { | |
395 | dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n); | |
396 | if (n >= 0) | |
397 | n = -ECONNRESET; | |
398 | v9fs_mux_cancel(m, n); | |
399 | } | |
400 | ||
401 | if (n & POLLIN) { | |
402 | set_bit(Rpending, &m->wsched); | |
403 | dprintk(DEBUG_MUX, "mux %p can read\n", m); | |
404 | if (!test_and_set_bit(Rworksched, &m->wsched)) { | |
405 | dprintk(DEBUG_MUX, "schedule read work mux %p\n", m); | |
406 | queue_work(v9fs_mux_wq, &m->rq); | |
407 | } | |
408 | } | |
426cc91a | 409 | |
3cf6429a LI |
410 | if (n & POLLOUT) { |
411 | set_bit(Wpending, &m->wsched); | |
412 | dprintk(DEBUG_MUX, "mux %p can write\n", m); | |
413 | if ((m->wsize || !list_empty(&m->unsent_req_list)) | |
414 | && !test_and_set_bit(Wworksched, &m->wsched)) { | |
415 | dprintk(DEBUG_MUX, "schedule write work mux %p\n", m); | |
416 | queue_work(v9fs_mux_wq, &m->wq); | |
417 | } | |
418 | } | |
426cc91a EVH |
419 | } |
420 | ||
421 | /** | |
3cf6429a LI |
422 | * v9fs_poll_proc - polls all v9fs transports for new events and queues |
423 | * the appropriate work to the work queue | |
426cc91a | 424 | */ |
3cf6429a | 425 | static int v9fs_poll_proc(void *a) |
426cc91a | 426 | { |
3cf6429a LI |
427 | struct v9fs_mux_data *m, *mtmp; |
428 | struct v9fs_mux_poll_task *vpt; | |
426cc91a | 429 | |
3cf6429a LI |
430 | vpt = a; |
431 | dprintk(DEBUG_MUX, "start %p %p\n", current, vpt); | |
432 | allow_signal(SIGKILL); | |
433 | while (!kthread_should_stop()) { | |
434 | set_current_state(TASK_INTERRUPTIBLE); | |
435 | if (signal_pending(current)) | |
436 | break; | |
426cc91a | 437 | |
3cf6429a LI |
438 | list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { |
439 | v9fs_poll_mux(m); | |
440 | } | |
441 | ||
442 | dprintk(DEBUG_MUX, "sleeping...\n"); | |
443 | schedule_timeout(SCHED_TIMEOUT * HZ); | |
444 | } | |
cb2e87a6 | 445 | |
3cf6429a LI |
446 | __set_current_state(TASK_RUNNING); |
447 | dprintk(DEBUG_MUX, "finish\n"); | |
448 | return 0; | |
449 | } | |
426cc91a | 450 | |
3cf6429a LI |
451 | /** |
452 | * v9fs_write_work - called when a transport can send some data | |
453 | */ | |
454 | static void v9fs_write_work(void *a) | |
455 | { | |
456 | int n, err; | |
457 | struct v9fs_mux_data *m; | |
531b1094 | 458 | struct v9fs_req *req; |
426cc91a | 459 | |
3cf6429a | 460 | m = a; |
426cc91a | 461 | |
3cf6429a LI |
462 | if (m->err < 0) { |
463 | clear_bit(Wworksched, &m->wsched); | |
464 | return; | |
426cc91a EVH |
465 | } |
466 | ||
3cf6429a LI |
467 | if (!m->wsize) { |
468 | if (list_empty(&m->unsent_req_list)) { | |
469 | clear_bit(Wworksched, &m->wsched); | |
470 | return; | |
426cc91a EVH |
471 | } |
472 | ||
3cf6429a | 473 | spin_lock(&m->lock); |
034b91a3 LI |
474 | again: |
475 | req = list_entry(m->unsent_req_list.next, struct v9fs_req, | |
531b1094 LI |
476 | req_list); |
477 | list_move_tail(&req->req_list, &m->req_list); | |
034b91a3 LI |
478 | if (req->err == ERREQFLUSH) |
479 | goto again; | |
480 | ||
531b1094 LI |
481 | m->wbuf = req->tcall->sdata; |
482 | m->wsize = req->tcall->size; | |
3cf6429a | 483 | m->wpos = 0; |
531b1094 | 484 | dump_data(m->wbuf, m->wsize); |
3cf6429a | 485 | spin_unlock(&m->lock); |
426cc91a EVH |
486 | } |
487 | ||
3cf6429a LI |
488 | dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize); |
489 | clear_bit(Wpending, &m->wsched); | |
490 | err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); | |
491 | dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err); | |
492 | if (err == -EAGAIN) { | |
493 | clear_bit(Wworksched, &m->wsched); | |
494 | return; | |
495 | } | |
496 | ||
497 | if (err <= 0) | |
498 | goto error; | |
499 | ||
500 | m->wpos += err; | |
501 | if (m->wpos == m->wsize) | |
502 | m->wpos = m->wsize = 0; | |
503 | ||
504 | if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { | |
505 | if (test_and_clear_bit(Wpending, &m->wsched)) | |
506 | n = POLLOUT; | |
507 | else | |
508 | n = m->trans->poll(m->trans, NULL); | |
509 | ||
510 | if (n & POLLOUT) { | |
511 | dprintk(DEBUG_MUX, "schedule write work mux %p\n", m); | |
512 | queue_work(v9fs_mux_wq, &m->wq); | |
513 | } else | |
514 | clear_bit(Wworksched, &m->wsched); | |
515 | } else | |
516 | clear_bit(Wworksched, &m->wsched); | |
426cc91a | 517 | |
3cf6429a LI |
518 | return; |
519 | ||
520 | error: | |
521 | v9fs_mux_cancel(m, err); | |
522 | clear_bit(Wworksched, &m->wsched); | |
426cc91a EVH |
523 | } |
524 | ||
3cf6429a | 525 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
322b329a | 526 | { |
3cf6429a | 527 | int ecode, tag; |
531b1094 | 528 | struct v9fs_str *ename; |
3cf6429a LI |
529 | |
530 | tag = req->tag; | |
034b91a3 | 531 | if (!req->err && req->rcall->id == RERROR) { |
3cf6429a | 532 | ecode = req->rcall->params.rerror.errno; |
531b1094 | 533 | ename = &req->rcall->params.rerror.error; |
322b329a | 534 | |
531b1094 | 535 | dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str); |
3cf6429a LI |
536 | |
537 | if (*m->extended) | |
538 | req->err = -ecode; | |
539 | ||
540 | if (!req->err) { | |
531b1094 | 541 | req->err = v9fs_errstr2errno(ename->str, ename->len); |
3cf6429a LI |
542 | |
543 | if (!req->err) { /* string match failed */ | |
531b1094 | 544 | PRINT_FCALL_ERROR("unknown error", req->rcall); |
3cf6429a LI |
545 | } |
546 | ||
547 | if (!req->err) | |
548 | req->err = -ESERVERFAULT; | |
549 | } | |
550 | } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { | |
551 | dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n", | |
552 | req->tcall->id + 1, req->rcall->id); | |
553 | if (!req->err) | |
554 | req->err = -EIO; | |
322b329a | 555 | } |
3cf6429a | 556 | |
034b91a3 LI |
557 | if (req->err == ERREQFLUSH) |
558 | return; | |
559 | ||
560 | if (req->cb) { | |
3cf6429a LI |
561 | dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", |
562 | req->tcall, req->rcall); | |
563 | ||
564 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | |
565 | req->cb = NULL; | |
566 | } else | |
567 | kfree(req->rcall); | |
568 | ||
531b1094 | 569 | v9fs_mux_put_tag(m, tag); |
3cf6429a LI |
570 | |
571 | wake_up(&m->equeue); | |
572 | kfree(req); | |
322b329a EVH |
573 | } |
574 | ||
426cc91a | 575 | /** |
3cf6429a | 576 | * v9fs_read_work - called when there is some data to be read from a transport |
426cc91a | 577 | */ |
3cf6429a | 578 | static void v9fs_read_work(void *a) |
426cc91a | 579 | { |
531b1094 | 580 | int n, err; |
3cf6429a LI |
581 | struct v9fs_mux_data *m; |
582 | struct v9fs_req *req, *rptr, *rreq; | |
583 | struct v9fs_fcall *rcall; | |
531b1094 | 584 | char *rbuf; |
3cf6429a LI |
585 | |
586 | m = a; | |
587 | ||
588 | if (m->err < 0) | |
589 | return; | |
590 | ||
591 | rcall = NULL; | |
592 | dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); | |
531b1094 LI |
593 | |
594 | if (!m->rcall) { | |
595 | m->rcall = | |
596 | kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL); | |
597 | if (!m->rcall) { | |
598 | err = -ENOMEM; | |
599 | goto error; | |
600 | } | |
601 | ||
602 | m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall); | |
603 | m->rpos = 0; | |
604 | } | |
605 | ||
3cf6429a LI |
606 | clear_bit(Rpending, &m->wsched); |
607 | err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); | |
608 | dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err); | |
609 | if (err == -EAGAIN) { | |
610 | clear_bit(Rworksched, &m->wsched); | |
611 | return; | |
612 | } | |
426cc91a | 613 | |
3cf6429a LI |
614 | if (err <= 0) |
615 | goto error; | |
426cc91a | 616 | |
3cf6429a LI |
617 | m->rpos += err; |
618 | while (m->rpos > 4) { | |
619 | n = le32_to_cpu(*(__le32 *) m->rbuf); | |
620 | if (n >= m->msize) { | |
621 | dprintk(DEBUG_ERROR, | |
622 | "requested packet size too big: %d\n", n); | |
623 | err = -EIO; | |
624 | goto error; | |
625 | } | |
626 | ||
627 | if (m->rpos < n) | |
426cc91a | 628 | break; |
3cf6429a | 629 | |
3cf6429a | 630 | dump_data(m->rbuf, n); |
531b1094 LI |
631 | err = |
632 | v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended); | |
cb2e87a6 | 633 | if (err < 0) { |
3cf6429a | 634 | goto error; |
426cc91a EVH |
635 | } |
636 | ||
5174fdab LI |
637 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { |
638 | char buf[150]; | |
639 | ||
640 | v9fs_printfcall(buf, sizeof(buf), m->rcall, | |
641 | *m->extended); | |
642 | printk(KERN_NOTICE ">>> %p %s\n", m, buf); | |
643 | } | |
644 | ||
531b1094 LI |
645 | rcall = m->rcall; |
646 | rbuf = m->rbuf; | |
647 | if (m->rpos > n) { | |
648 | m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize, | |
649 | GFP_KERNEL); | |
650 | if (!m->rcall) { | |
651 | err = -ENOMEM; | |
652 | goto error; | |
653 | } | |
654 | ||
655 | m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall); | |
656 | memmove(m->rbuf, rbuf + n, m->rpos - n); | |
657 | m->rpos -= n; | |
658 | } else { | |
659 | m->rcall = NULL; | |
660 | m->rbuf = NULL; | |
661 | m->rpos = 0; | |
662 | } | |
663 | ||
3cf6429a LI |
664 | dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id, |
665 | rcall->tag); | |
666 | ||
667 | req = NULL; | |
668 | spin_lock(&m->lock); | |
669 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | |
670 | if (rreq->tag == rcall->tag) { | |
671 | req = rreq; | |
672 | req->rcall = rcall; | |
673 | list_del(&req->req_list); | |
674 | spin_unlock(&m->lock); | |
675 | process_request(m, req); | |
676 | break; | |
426cc91a | 677 | } |
531b1094 | 678 | |
426cc91a EVH |
679 | } |
680 | ||
426cc91a | 681 | if (!req) { |
3cf6429a LI |
682 | spin_unlock(&m->lock); |
683 | if (err >= 0 && rcall->id != RFLUSH) | |
cb2e87a6 | 684 | dprintk(DEBUG_ERROR, |
3cf6429a LI |
685 | "unexpected response mux %p id %d tag %d\n", |
686 | m, rcall->id, rcall->tag); | |
426cc91a EVH |
687 | kfree(rcall); |
688 | } | |
426cc91a EVH |
689 | } |
690 | ||
3cf6429a LI |
691 | if (!list_empty(&m->req_list)) { |
692 | if (test_and_clear_bit(Rpending, &m->wsched)) | |
693 | n = POLLIN; | |
694 | else | |
695 | n = m->trans->poll(m->trans, NULL); | |
696 | ||
697 | if (n & POLLIN) { | |
698 | dprintk(DEBUG_MUX, "schedule read work mux %p\n", m); | |
699 | queue_work(v9fs_mux_wq, &m->rq); | |
700 | } else | |
701 | clear_bit(Rworksched, &m->wsched); | |
702 | } else | |
703 | clear_bit(Rworksched, &m->wsched); | |
426cc91a | 704 | |
3cf6429a | 705 | return; |
426cc91a | 706 | |
3cf6429a LI |
707 | error: |
708 | v9fs_mux_cancel(m, err); | |
709 | clear_bit(Rworksched, &m->wsched); | |
426cc91a EVH |
710 | } |
711 | ||
712 | /** | |
3cf6429a LI |
713 | * v9fs_send_request - send 9P request |
714 | * The function can sleep until the request is scheduled for sending. | |
715 | * The function can be interrupted. Return from the function is not | |
716 | * a guarantee that the request is sent succesfully. Can return errors | |
717 | * that can be retrieved by PTR_ERR macros. | |
426cc91a | 718 | * |
3cf6429a LI |
719 | * @m: mux data |
720 | * @tc: request to be sent | |
721 | * @cb: callback function to call when response is received | |
722 | * @cba: parameter to pass to the callback function | |
426cc91a | 723 | */ |
3cf6429a LI |
724 | static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, |
725 | struct v9fs_fcall *tc, | |
726 | v9fs_mux_req_callback cb, void *cba) | |
727 | { | |
728 | int n; | |
729 | struct v9fs_req *req; | |
730 | ||
731 | dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, | |
732 | tc, tc->id); | |
733 | if (m->err < 0) | |
734 | return ERR_PTR(m->err); | |
735 | ||
736 | req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL); | |
737 | if (!req) | |
738 | return ERR_PTR(-ENOMEM); | |
426cc91a | 739 | |
3cf6429a LI |
740 | if (tc->id == TVERSION) |
741 | n = V9FS_NOTAG; | |
742 | else | |
531b1094 | 743 | n = v9fs_mux_get_tag(m); |
3cf6429a LI |
744 | |
745 | if (n < 0) | |
746 | return ERR_PTR(-ENOMEM); | |
747 | ||
531b1094 LI |
748 | v9fs_set_tag(tc, n); |
749 | ||
5174fdab LI |
750 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { |
751 | char buf[150]; | |
752 | ||
753 | v9fs_printfcall(buf, sizeof(buf), tc, *m->extended); | |
754 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | |
755 | } | |
756 | ||
3cf6429a LI |
757 | req->tag = n; |
758 | req->tcall = tc; | |
759 | req->rcall = NULL; | |
760 | req->err = 0; | |
761 | req->cb = cb; | |
762 | req->cba = cba; | |
763 | ||
764 | spin_lock(&m->lock); | |
765 | list_add_tail(&req->req_list, &m->unsent_req_list); | |
766 | spin_unlock(&m->lock); | |
767 | ||
768 | if (test_and_clear_bit(Wpending, &m->wsched)) | |
769 | n = POLLOUT; | |
770 | else | |
771 | n = m->trans->poll(m->trans, NULL); | |
772 | ||
773 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) | |
774 | queue_work(v9fs_mux_wq, &m->wq); | |
775 | ||
776 | return req; | |
777 | } | |
778 | ||
29c6e486 AB |
779 | static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, |
780 | struct v9fs_fcall *rc, int err) | |
426cc91a | 781 | { |
3cf6429a LI |
782 | v9fs_mux_req_callback cb; |
783 | int tag; | |
784 | struct v9fs_mux_data *m; | |
785 | struct v9fs_req *req, *rptr; | |
786 | ||
787 | m = a; | |
788 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, | |
789 | rc, err, tc->params.tflush.oldtag); | |
790 | ||
791 | spin_lock(&m->lock); | |
792 | cb = NULL; | |
793 | tag = tc->params.tflush.oldtag; | |
794 | list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { | |
795 | if (req->tag == tag) { | |
796 | list_del(&req->req_list); | |
797 | if (req->cb) { | |
798 | cb = req->cb; | |
799 | req->cb = NULL; | |
800 | spin_unlock(&m->lock); | |
801 | (*cb) (req->cba, req->tcall, req->rcall, | |
802 | req->err); | |
803 | } | |
804 | kfree(req); | |
805 | wake_up(&m->equeue); | |
806 | break; | |
807 | } | |
808 | } | |
809 | ||
810 | if (!cb) | |
811 | spin_unlock(&m->lock); | |
812 | ||
531b1094 | 813 | v9fs_mux_put_tag(m, tag); |
3cf6429a LI |
814 | kfree(tc); |
815 | kfree(rc); | |
816 | } | |
817 | ||
818 | static void | |
819 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |
820 | { | |
821 | struct v9fs_fcall *fc; | |
822 | ||
823 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | |
824 | ||
531b1094 | 825 | fc = v9fs_create_tflush(req->tag); |
3cf6429a LI |
826 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); |
827 | } | |
828 | ||
829 | static void | |
830 | v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) | |
831 | { | |
832 | struct v9fs_mux_rpc *r; | |
833 | ||
834 | if (err == ERREQFLUSH) { | |
034b91a3 | 835 | kfree(rc); |
3cf6429a LI |
836 | dprintk(DEBUG_MUX, "err req flush\n"); |
837 | return; | |
838 | } | |
839 | ||
840 | r = a; | |
841 | dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, | |
842 | tc, rc, err); | |
843 | r->rcall = rc; | |
844 | r->err = err; | |
845 | wake_up(&r->wqueue); | |
846 | } | |
847 | ||
848 | /** | |
849 | * v9fs_mux_rpc - sends 9P request and waits until a response is available. | |
850 | * The function can be interrupted. | |
851 | * @m: mux data | |
852 | * @tc: request to be sent | |
853 | * @rc: pointer where a pointer to the response is stored | |
854 | */ | |
855 | int | |
856 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | |
857 | struct v9fs_fcall **rc) | |
858 | { | |
859 | int err; | |
860 | unsigned long flags; | |
861 | struct v9fs_req *req; | |
862 | struct v9fs_mux_rpc r; | |
863 | ||
864 | r.err = 0; | |
865 | r.rcall = NULL; | |
866 | r.m = m; | |
867 | init_waitqueue_head(&r.wqueue); | |
868 | ||
869 | if (rc) | |
870 | *rc = NULL; | |
871 | ||
872 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); | |
873 | if (IS_ERR(req)) { | |
874 | err = PTR_ERR(req); | |
875 | dprintk(DEBUG_MUX, "error %d\n", err); | |
876 | return PTR_ERR(req); | |
877 | } | |
878 | ||
879 | r.req = req; | |
880 | dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc, | |
881 | req->tag, &r, req); | |
882 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | |
883 | if (r.err < 0) | |
884 | err = r.err; | |
885 | ||
886 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { | |
887 | spin_lock(&m->lock); | |
888 | req->tcall = NULL; | |
889 | req->err = ERREQFLUSH; | |
890 | spin_unlock(&m->lock); | |
891 | ||
892 | clear_thread_flag(TIF_SIGPENDING); | |
893 | v9fs_mux_flush_request(m, req); | |
894 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
895 | recalc_sigpending(); | |
896 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
426cc91a EVH |
897 | } |
898 | ||
3cf6429a LI |
899 | if (!err) { |
900 | if (r.rcall) | |
901 | dprintk(DEBUG_MUX, "got response id %d tag %d\n", | |
902 | r.rcall->id, r.rcall->tag); | |
903 | ||
904 | if (rc) | |
905 | *rc = r.rcall; | |
906 | else | |
907 | kfree(r.rcall); | |
908 | } else { | |
909 | kfree(r.rcall); | |
910 | dprintk(DEBUG_MUX, "got error %d\n", err); | |
911 | if (err > 0) | |
912 | err = -EIO; | |
913 | } | |
914 | ||
915 | return err; | |
916 | } | |
917 | ||
29c6e486 | 918 | #if 0 |
3cf6429a LI |
919 | /** |
920 | * v9fs_mux_rpcnb - sends 9P request without waiting for response. | |
921 | * @m: mux data | |
922 | * @tc: request to be sent | |
923 | * @cb: callback function to be called when response arrives | |
924 | * @cba: value to pass to the callback function | |
925 | */ | |
926 | int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | |
927 | v9fs_mux_req_callback cb, void *a) | |
928 | { | |
929 | int err; | |
930 | struct v9fs_req *req; | |
931 | ||
932 | req = v9fs_send_request(m, tc, cb, a); | |
933 | if (IS_ERR(req)) { | |
934 | err = PTR_ERR(req); | |
935 | dprintk(DEBUG_MUX, "error %d\n", err); | |
936 | return PTR_ERR(req); | |
937 | } | |
426cc91a | 938 | |
3cf6429a | 939 | dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag); |
426cc91a EVH |
940 | return 0; |
941 | } | |
29c6e486 | 942 | #endif /* 0 */ |
3cf6429a LI |
943 | |
944 | /** | |
945 | * v9fs_mux_cancel - cancel all pending requests with error | |
946 | * @m: mux data | |
947 | * @err: error code | |
948 | */ | |
949 | void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |
950 | { | |
951 | struct v9fs_req *req, *rtmp; | |
952 | LIST_HEAD(cancel_list); | |
953 | ||
954 | dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); | |
955 | m->err = err; | |
956 | spin_lock(&m->lock); | |
957 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | |
958 | list_move(&req->req_list, &cancel_list); | |
959 | } | |
960 | spin_unlock(&m->lock); | |
961 | ||
962 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | |
963 | list_del(&req->req_list); | |
964 | if (!req->err) | |
965 | req->err = err; | |
966 | ||
967 | if (req->cb) | |
968 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | |
969 | else | |
970 | kfree(req->rcall); | |
971 | ||
972 | kfree(req); | |
973 | } | |
974 | ||
975 | wake_up(&m->equeue); | |
976 | } | |
531b1094 LI |
977 | |
978 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m) | |
979 | { | |
980 | int tag; | |
981 | ||
4a26c242 | 982 | tag = v9fs_get_idpool(&m->tagpool); |
531b1094 LI |
983 | if (tag < 0) |
984 | return V9FS_NOTAG; | |
985 | else | |
986 | return (u16) tag; | |
987 | } | |
988 | ||
989 | static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag) | |
990 | { | |
4a26c242 RC |
991 | if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tagpool)) |
992 | v9fs_put_idpool(tag, &m->tagpool); | |
531b1094 | 993 | } |