]> Git Repo - linux.git/blame - drivers/staging/greybus/operation.c
greybus: tag core init and exit functions
[linux.git] / drivers / staging / greybus / operation.c
CommitLineData
e88afa58
AE
1/*
2 * Greybus operations
3 *
d3d2bea1
AE
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
e88afa58
AE
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/workqueue.h>
14
15#include "greybus.h"
16
82b5e3fe 17/* The default amount of time a request is given to complete */
708971e4 18#define OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
e816e374 19
5b3db0dd 20static struct kmem_cache *gb_operation_cache;
1e5613b4 21static struct kmem_cache *gb_message_cache;
5b3db0dd 22
2eb585f8 23/* Workqueue to handle Greybus operation completions. */
ee637a9b 24static struct workqueue_struct *gb_operation_workqueue;
2eb585f8 25
43cdae5c
AE
26/* Protects the cookie representing whether a message is in flight */
27static DEFINE_MUTEX(gb_message_mutex);
28
82b5e3fe
AE
29/*
30 * Protects access to connection operations lists, as well as
31 * updates to operation->errno.
32 */
e88afa58
AE
33static DEFINE_SPINLOCK(gb_operations_lock);
34
3deb37d4 35/*
2fb2d2a7
AE
36 * Set an operation's result.
37 *
38 * Initially an outgoing operation's errno value is -EBADR.
39 * If no error occurs before sending the request message the only
40 * valid value operation->errno can be set to is -EINPROGRESS,
41 * indicating the request has been (or rather is about to be) sent.
42 * At that point nobody should be looking at the result until the
d5062834 43 * response arrives.
3deb37d4
AE
44 *
45 * The first time the result gets set after the request has been
46 * sent, that result "sticks." That is, if two concurrent threads
47 * race to set the result, the first one wins. The return value
48 * tells the caller whether its result was recorded; if not the
2fb2d2a7
AE
49 * caller has nothing more to do.
50 *
51 * The result value -EILSEQ is reserved to signal an implementation
52 * error; if it's ever observed, the code performing the request has
53 * done something fundamentally wrong. It is an error to try to set
54 * the result to -EBADR, and attempts to do so result in a warning,
55 * and -EILSEQ is used instead. Similarly, the only valid result
56 * value to set for an operation in initial state is -EINPROGRESS.
57 * Attempts to do otherwise will also record a (successful) -EILSEQ
58 * operation result.
3deb37d4 59 */
abe9a300 60static bool gb_operation_result_set(struct gb_operation *operation, int result)
ba986b5a 61{
184ab534 62 unsigned long flags;
894cbc31
AE
63 int prev;
64
3deb37d4 65 if (result == -EINPROGRESS) {
2fb2d2a7
AE
66 /*
67 * -EINPROGRESS is used to indicate the request is
68 * in flight. It should be the first result value
69 * set after the initial -EBADR. Issue a warning
70 * and record an implementation error if it's
71 * set at any other time.
72 */
184ab534 73 spin_lock_irqsave(&gb_operations_lock, flags);
894cbc31
AE
74 prev = operation->errno;
75 if (prev == -EBADR)
76 operation->errno = result;
2fb2d2a7
AE
77 else
78 operation->errno = -EILSEQ;
184ab534 79 spin_unlock_irqrestore(&gb_operations_lock, flags);
2fb2d2a7 80 WARN_ON(prev != -EBADR);
894cbc31 81
2fb2d2a7 82 return true;
3deb37d4 83 }
3deb37d4 84
2fb2d2a7
AE
85 /*
86 * The first result value set after a request has been sent
87 * will be the final result of the operation. Subsequent
88 * attempts to set the result are ignored.
89 *
90 * Note that -EBADR is a reserved "initial state" result
91 * value. Attempts to set this value result in a warning,
92 * and the result code is set to -EILSEQ instead.
93 */
94 if (WARN_ON(result == -EBADR))
95 result = -EILSEQ; /* Nobody should be setting -EBADR */
96
184ab534 97 spin_lock_irqsave(&gb_operations_lock, flags);
894cbc31
AE
98 prev = operation->errno;
99 if (prev == -EINPROGRESS)
2fb2d2a7 100 operation->errno = result; /* First and final result */
184ab534 101 spin_unlock_irqrestore(&gb_operations_lock, flags);
894cbc31
AE
102
103 return prev == -EINPROGRESS;
ba986b5a
AE
104}
105
106int gb_operation_result(struct gb_operation *operation)
107{
3deb37d4
AE
108 int result = operation->errno;
109
2fb2d2a7 110 WARN_ON(result == -EBADR);
3deb37d4
AE
111 WARN_ON(result == -EINPROGRESS);
112
113 return result;
ba986b5a 114}
1dad6b35 115EXPORT_SYMBOL_GPL(gb_operation_result);
ba986b5a 116
84d148b1 117static struct gb_operation *
afb2e134 118gb_operation_find(struct gb_connection *connection, u16 operation_id)
84d148b1 119{
b8616da8 120 struct gb_operation *operation;
184ab534 121 unsigned long flags;
84d148b1
AE
122 bool found = false;
123
184ab534 124 spin_lock_irqsave(&gb_operations_lock, flags);
afb2e134 125 list_for_each_entry(operation, &connection->operations, links)
8fc71a73 126 if (operation->id == operation_id) {
84d148b1 127 found = true;
b8616da8
AE
128 break;
129 }
184ab534 130 spin_unlock_irqrestore(&gb_operations_lock, flags);
84d148b1
AE
131
132 return found ? operation : NULL;
133}
134
e413614b 135static int gb_message_send(struct gb_message *message)
374e6a26 136{
3ed67aba 137 struct gb_connection *connection = message->operation->connection;
6014718d 138 int ret = 0;
43cdae5c 139 void *cookie;
002fe66a 140
43cdae5c 141 mutex_lock(&gb_message_mutex);
7cf7bca9 142 cookie = connection->hd->driver->message_send(connection->hd,
0a9c4d70 143 connection->hd_cport_id,
7cf7bca9 144 message,
e413614b 145 GFP_KERNEL);
43cdae5c
AE
146 if (IS_ERR(cookie))
147 ret = PTR_ERR(cookie);
148 else
149 message->cookie = cookie;
150 mutex_unlock(&gb_message_mutex);
151
6014718d 152 return ret;
374e6a26
AE
153}
154
6014718d 155/*
7cf7bca9 156 * Cancel a message we have passed to the host device layer to be sent.
6014718d 157 */
35b1342b 158static void gb_message_cancel(struct gb_message *message)
374e6a26 159{
43cdae5c
AE
160 mutex_lock(&gb_message_mutex);
161 if (message->cookie) {
162 struct greybus_host_device *hd;
374e6a26 163
43cdae5c 164 hd = message->operation->connection->hd;
7cf7bca9 165 hd->driver->message_cancel(message->cookie);
43cdae5c
AE
166 }
167 mutex_unlock(&gb_message_mutex);
374e6a26 168}
a9163b2c 169
2eb585f8
AE
170static void gb_operation_request_handle(struct gb_operation *operation)
171{
f8fb05e2 172 struct gb_protocol *protocol = operation->connection->protocol;
973ccfd6 173 int status;
ff65be7a 174 int ret;
c3cf2785 175
fb69cb50
GKH
176 if (!protocol)
177 return;
178
f8fb05e2 179 if (protocol->request_recv) {
973ccfd6
JH
180 status = protocol->request_recv(operation->type, operation);
181 } else {
182 dev_err(&operation->connection->dev,
183 "unexpected incoming request type 0x%02hhx\n",
184 operation->type);
2eb585f8 185
973ccfd6
JH
186 status = -EPROTONOSUPPORT;
187 }
ff65be7a 188
973ccfd6 189 ret = gb_operation_response_send(operation, status);
ff65be7a
JH
190 if (ret) {
191 dev_err(&operation->connection->dev,
192 "failed to send response %d: %d\n",
973ccfd6 193 status, ret);
ff65be7a
JH
194 return;
195 }
2eb585f8
AE
196}
197
e88afa58 198/*
d4a1ff67
AE
199 * Complete an operation in non-atomic context. For incoming
200 * requests, the callback function is the request handler, and
201 * the operation result should be -EINPROGRESS at this point.
202 *
203 * For outgoing requests, the operation result value should have
204 * been set before queueing this. The operation callback function
205 * allows the original requester to know the request has completed
206 * and its result is available.
e88afa58 207 */
ee637a9b 208static void gb_operation_work(struct work_struct *work)
e88afa58 209{
84d148b1 210 struct gb_operation *operation;
84d148b1 211
ee637a9b 212 operation = container_of(work, struct gb_operation, work);
37754030
JH
213
214 operation->callback(operation);
215
10c69399 216 gb_operation_put(operation);
2eb585f8
AE
217}
218
dc779229
AE
219static void gb_operation_message_init(struct greybus_host_device *hd,
220 struct gb_message *message, u16 operation_id,
7cfa6995 221 size_t payload_size, u8 type)
dc779229
AE
222{
223 struct gb_operation_msg_hdr *header;
dc779229 224
24ef4853 225 header = message->buffer;
dc779229
AE
226
227 message->header = header;
746e0ef9 228 message->payload = payload_size ? header + 1 : NULL;
7cfa6995 229 message->payload_size = payload_size;
dc779229
AE
230
231 /*
232 * The type supplied for incoming message buffers will be
233 * 0x00. Such buffers will be overwritten by arriving data
234 * so there's no need to initialize the message header.
235 */
c939c2f8 236 if (type != GB_OPERATION_TYPE_INVALID) {
7cfa6995
AE
237 u16 message_size = (u16)(sizeof(*header) + payload_size);
238
dc779229
AE
239 /*
240 * For a request, the operation id gets filled in
241 * when the message is sent. For a response, it
242 * will be copied from the request by the caller.
243 *
244 * The result field in a request message must be
245 * zero. It will be set just prior to sending for
246 * a response.
247 */
248 header->size = cpu_to_le16(message_size);
249 header->operation_id = 0;
250 header->type = type;
251 header->result = 0;
252 }
253}
254
e88afa58 255/*
ea64cd9a
AE
256 * Allocate a message to be used for an operation request or response.
257 * Both types of message contain a common header. The request message
258 * for an outgoing operation is outbound, as is the response message
259 * for an incoming operation. The message header for an outbound
260 * message is partially initialized here.
261 *
262 * The headers for inbound messages don't need to be initialized;
263 * they'll be filled in by arriving data.
87d208fe 264 *
1e5613b4 265 * Our message buffers have the following layout:
87d208fe
AE
266 * message header \_ these combined are
267 * message payload / the message size
22b320f4 268 */
c08b1dda
AE
269static struct gb_message *
270gb_operation_message_alloc(struct greybus_host_device *hd, u8 type,
87d208fe 271 size_t payload_size, gfp_t gfp_flags)
22b320f4 272{
c7f82d5d 273 struct gb_message *message;
22b320f4 274 struct gb_operation_msg_hdr *header;
87d208fe 275 size_t message_size = payload_size + sizeof(*header);
22b320f4 276
1e5613b4
JH
277 if (message_size > hd->buffer_size_max) {
278 pr_warn("requested message size too big (%zu > %zu)\n",
0cffcac3 279 message_size, hd->buffer_size_max);
1e5613b4 280 return NULL;
0cffcac3 281 }
1e5613b4
JH
282
283 /* Allocate the message structure and buffer. */
284 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
c08b1dda
AE
285 if (!message)
286 return NULL;
22b320f4 287
24ef4853 288 message->buffer = kzalloc(message_size, gfp_flags);
1e5613b4
JH
289 if (!message->buffer)
290 goto err_free_message;
291
dc779229 292 /* Initialize the message. Operation id is filled in later. */
7cfa6995 293 gb_operation_message_init(hd, message, 0, payload_size, type);
ea64cd9a 294
c08b1dda 295 return message;
1e5613b4
JH
296
297err_free_message:
298 kmem_cache_free(gb_message_cache, message);
299
300 return NULL;
c7f82d5d
AE
301}
302
c08b1dda 303static void gb_operation_message_free(struct gb_message *message)
c7f82d5d 304{
1e5613b4
JH
305 kfree(message->buffer);
306 kmem_cache_free(gb_message_cache, message);
22b320f4
AE
307}
308
bc717fcb 309/*
696e0cca
VK
310 * Map an enum gb_operation_status value (which is represented in a
311 * message as a single byte) to an appropriate Linux negative errno.
bc717fcb 312 */
0c90fff4 313static int gb_operation_status_map(u8 status)
bc717fcb
AE
314{
315 switch (status) {
316 case GB_OP_SUCCESS:
317 return 0;
bc717fcb
AE
318 case GB_OP_INTERRUPTED:
319 return -EINTR;
57248fac
AE
320 case GB_OP_TIMEOUT:
321 return -ETIMEDOUT;
322 case GB_OP_NO_MEMORY:
323 return -ENOMEM;
bc717fcb
AE
324 case GB_OP_PROTOCOL_BAD:
325 return -EPROTONOSUPPORT;
326 case GB_OP_OVERFLOW:
1a365154 327 return -EMSGSIZE;
57248fac
AE
328 case GB_OP_INVALID:
329 return -EINVAL;
330 case GB_OP_RETRY:
331 return -EAGAIN;
aa26351d
AE
332 case GB_OP_NONEXISTENT:
333 return -ENODEV;
57248fac
AE
334 case GB_OP_MALFUNCTION:
335 return -EILSEQ;
336 case GB_OP_UNKNOWN_ERROR:
bc717fcb
AE
337 default:
338 return -EIO;
339 }
340}
341
0c90fff4
AE
342/*
343 * Map a Linux errno value (from operation->errno) into the value
344 * that should represent it in a response message status sent
345 * over the wire. Returns an enum gb_operation_status value (which
346 * is represented in a message as a single byte).
347 */
348static u8 gb_operation_errno_map(int errno)
349{
350 switch (errno) {
351 case 0:
352 return GB_OP_SUCCESS;
353 case -EINTR:
354 return GB_OP_INTERRUPTED;
355 case -ETIMEDOUT:
356 return GB_OP_TIMEOUT;
357 case -ENOMEM:
358 return GB_OP_NO_MEMORY;
359 case -EPROTONOSUPPORT:
360 return GB_OP_PROTOCOL_BAD;
361 case -EMSGSIZE:
362 return GB_OP_OVERFLOW; /* Could be underflow too */
363 case -EINVAL:
364 return GB_OP_INVALID;
365 case -EAGAIN:
366 return GB_OP_RETRY;
367 case -EILSEQ:
368 return GB_OP_MALFUNCTION;
aa26351d
AE
369 case -ENODEV:
370 return GB_OP_NONEXISTENT;
0c90fff4
AE
371 case -EIO:
372 default:
373 return GB_OP_UNKNOWN_ERROR;
374 }
375}
376
82e26f73
AE
377bool gb_operation_response_alloc(struct gb_operation *operation,
378 size_t response_size)
379{
380 struct greybus_host_device *hd = operation->connection->hd;
381 struct gb_operation_msg_hdr *request_header;
382 struct gb_message *response;
383 u8 type;
384
6d653370 385 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
82e26f73
AE
386 response = gb_operation_message_alloc(hd, type, response_size,
387 GFP_KERNEL);
388 if (!response)
389 return false;
390 response->operation = operation;
391
392 /*
393 * Size and type get initialized when the message is
394 * allocated. The errno will be set before sending. All
395 * that's left is the operation id, which we copy from the
396 * request message header (as-is, in little-endian order).
397 */
82b5e3fe 398 request_header = operation->request->header;
82e26f73
AE
399 response->header->operation_id = request_header->operation_id;
400 operation->response = response;
401
402 return true;
403}
1dad6b35 404EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
82e26f73 405
22b320f4
AE
406/*
407 * Create a Greybus operation to be sent over the given connection.
696e0cca 408 * The request buffer will be big enough for a payload of the given
ea64cd9a
AE
409 * size.
410 *
411 * For outgoing requests, the request message's header will be
412 * initialized with the type of the request and the message size.
413 * Outgoing operations must also specify the response buffer size,
414 * which must be sufficient to hold all expected response data. The
415 * response message header will eventually be overwritten, so there's
416 * no need to initialize it here.
22b320f4 417 *
ea64cd9a
AE
418 * Request messages for incoming operations can arrive in interrupt
419 * context, so they must be allocated with GFP_ATOMIC. In this case
420 * the request buffer will be immediately overwritten, so there is
421 * no need to initialize the message header. Responsibility for
422 * allocating a response buffer lies with the incoming request
423 * handler for a protocol. So we don't allocate that here.
e88afa58 424 *
22b320f4
AE
425 * Returns a pointer to the new operation or a null pointer if an
426 * error occurs.
e88afa58 427 */
30a2964f 428static struct gb_operation *
ea64cd9a
AE
429gb_operation_create_common(struct gb_connection *connection, u8 type,
430 size_t request_size, size_t response_size)
e88afa58 431{
c08b1dda 432 struct greybus_host_device *hd = connection->hd;
e88afa58 433 struct gb_operation *operation;
184ab534 434 unsigned long flags;
c939c2f8 435 gfp_t gfp_flags;
e88afa58 436
c939c2f8
AE
437 /*
438 * An incoming request will pass an invalid operation type,
439 * because the header will get overwritten anyway. These
440 * occur in interrupt context, so we must use GFP_ATOMIC.
441 */
442 if (type == GB_OPERATION_TYPE_INVALID)
443 gfp_flags = GFP_ATOMIC;
444 else
445 gfp_flags = GFP_KERNEL;
5b3db0dd 446 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
e88afa58
AE
447 if (!operation)
448 return NULL;
6507cced 449 operation->connection = connection;
e88afa58 450
c08b1dda
AE
451 operation->request = gb_operation_message_alloc(hd, type, request_size,
452 gfp_flags);
453 if (!operation->request)
5b3db0dd 454 goto err_cache;
c08b1dda 455 operation->request->operation = operation;
22b320f4 456
ea64cd9a 457 /* Allocate the response buffer for outgoing operations */
82b5e3fe 458 if (type != GB_OPERATION_TYPE_INVALID) {
82e26f73 459 if (!gb_operation_response_alloc(operation, response_size))
5b3db0dd 460 goto err_request;
82b5e3fe
AE
461 operation->type = type;
462 }
3deb37d4 463 operation->errno = -EBADR; /* Initial value--means "never set" */
e88afa58 464
ee637a9b 465 INIT_WORK(&operation->work, gb_operation_work);
e88afa58 466 init_completion(&operation->completion);
c7d0f258 467 kref_init(&operation->kref);
e88afa58 468
184ab534 469 spin_lock_irqsave(&gb_operations_lock, flags);
e88afa58 470 list_add_tail(&operation->links, &connection->operations);
184ab534 471 spin_unlock_irqrestore(&gb_operations_lock, flags);
e88afa58
AE
472
473 return operation;
5b3db0dd
AE
474
475err_request:
c08b1dda 476 gb_operation_message_free(operation->request);
5b3db0dd
AE
477err_cache:
478 kmem_cache_free(gb_operation_cache, operation);
479
480 return NULL;
e88afa58
AE
481}
482
55f66a88
AE
483/*
484 * Create a new operation associated with the given connection. The
485 * request and response sizes provided are the number of bytes
486 * required to hold the request/response payload only. Both of
487 * these are allowed to be 0. Note that 0x00 is reserved as an
488 * invalid operation type for all protocols, and this is enforced
489 * here.
490 */
30a2964f
AE
491struct gb_operation *gb_operation_create(struct gb_connection *connection,
492 u8 type, size_t request_size,
493 size_t response_size)
494{
c939c2f8 495 if (WARN_ON_ONCE(type == GB_OPERATION_TYPE_INVALID))
55f66a88 496 return NULL;
6d653370
AE
497 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
498 type &= ~GB_MESSAGE_TYPE_RESPONSE;
55f66a88 499
ea64cd9a 500 return gb_operation_create_common(connection, type,
30a2964f
AE
501 request_size, response_size);
502}
df469a94 503EXPORT_SYMBOL_GPL(gb_operation_create);
30a2964f 504
d52b35f6
JH
505size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
506{
507 struct greybus_host_device *hd = connection->hd;
508
509 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
510}
511EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
512
30a2964f 513static struct gb_operation *
ea64cd9a 514gb_operation_create_incoming(struct gb_connection *connection, u16 id,
cfa79699 515 u8 type, void *data, size_t size)
30a2964f 516{
34db1f91 517 struct gb_operation *operation;
cfa79699
JH
518 size_t request_size;
519
520 /* Caller has made sure we at least have a message header. */
521 request_size = size - sizeof(struct gb_operation_msg_hdr);
34db1f91 522
c939c2f8
AE
523 operation = gb_operation_create_common(connection,
524 GB_OPERATION_TYPE_INVALID,
525 request_size, 0);
34db1f91 526 if (operation) {
ea64cd9a 527 operation->id = id;
82b5e3fe 528 operation->type = type;
cfa79699 529 memcpy(operation->request->header, data, size);
34db1f91
AE
530 }
531
532 return operation;
30a2964f
AE
533}
534
deb4b9ef
AE
535/*
536 * Get an additional reference on an operation.
537 */
538void gb_operation_get(struct gb_operation *operation)
539{
540 kref_get(&operation->kref);
541}
1dad6b35 542EXPORT_SYMBOL_GPL(gb_operation_get);
deb4b9ef 543
e88afa58
AE
544/*
545 * Destroy a previously created operation.
546 */
c7d0f258 547static void _gb_operation_destroy(struct kref *kref)
e88afa58 548{
c7d0f258 549 struct gb_operation *operation;
184ab534 550 unsigned long flags;
c7d0f258
AE
551
552 operation = container_of(kref, struct gb_operation, kref);
e88afa58
AE
553
554 /* XXX Make sure it's not in flight */
184ab534 555 spin_lock_irqsave(&gb_operations_lock, flags);
e88afa58 556 list_del(&operation->links);
184ab534 557 spin_unlock_irqrestore(&gb_operations_lock, flags);
e88afa58 558
94896676
JH
559 if (operation->response)
560 gb_operation_message_free(operation->response);
c08b1dda 561 gb_operation_message_free(operation->request);
e88afa58 562
5b3db0dd 563 kmem_cache_free(gb_operation_cache, operation);
e88afa58 564}
d90c25b0 565
deb4b9ef
AE
566/*
567 * Drop a reference on an operation, and destroy it when the last
568 * one is gone.
569 */
c7d0f258
AE
570void gb_operation_put(struct gb_operation *operation)
571{
572 if (!WARN_ON(!operation))
573 kref_put(&operation->kref, _gb_operation_destroy);
574}
df469a94 575EXPORT_SYMBOL_GPL(gb_operation_put);
c7d0f258 576
10c69399
AE
577/* Tell the requester we're done */
578static void gb_operation_sync_callback(struct gb_operation *operation)
579{
580 complete(&operation->completion);
581}
582
d90c25b0 583/*
37754030
JH
584 * Send an operation request message. The caller has filled in any payload so
585 * the request message is ready to go. The callback function supplied will be
586 * called when the response message has arrived indicating the operation is
587 * complete. In that case, the callback function is responsible for fetching
588 * the result of the operation using gb_operation_result() if desired, and
589 * dropping the initial reference to the operation.
d90c25b0
AE
590 */
591int gb_operation_request_send(struct gb_operation *operation,
592 gb_operation_callback callback)
593{
afb2e134
AE
594 struct gb_connection *connection = operation->connection;
595 struct gb_operation_msg_hdr *header;
4afb7fd0 596 unsigned int cycle;
ea2c2ee8 597 int ret;
d90c25b0 598
37754030
JH
599 if (!callback)
600 return -EINVAL;
601
afb2e134 602 if (connection->state != GB_CONNECTION_STATE_ENABLED)
36561f23
AE
603 return -ENOTCONN;
604
d90c25b0 605 /*
deb4b9ef
AE
606 * First, get an extra reference on the operation.
607 * It'll be dropped when the operation completes.
d90c25b0 608 */
deb4b9ef
AE
609 gb_operation_get(operation);
610
c25572ca
AE
611 /*
612 * Record the callback function, which is executed in
613 * non-atomic (workqueue) context when the final result
614 * of an operation has been set.
615 */
616 operation->callback = callback;
afb2e134
AE
617
618 /*
619 * Assign the operation's id, and store it in the request header.
620 * Zero is a reserved operation id.
621 */
4afb7fd0
AE
622 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
623 operation->id = (u16)(cycle % U16_MAX + 1);
afb2e134
AE
624 header = operation->request->header;
625 header->operation_id = cpu_to_le16(operation->id);
e8b48d15 626
e8b48d15 627 /* All set, send the request */
3deb37d4 628 gb_operation_result_set(operation, -EINPROGRESS);
c25572ca 629
ea2c2ee8
JH
630 ret = gb_message_send(operation->request);
631 if (ret)
632 gb_operation_put(operation);
633
634 return ret;
c25572ca 635}
1dad6b35 636EXPORT_SYMBOL_GPL(gb_operation_request_send);
c25572ca
AE
637
638/*
639 * Send a synchronous operation. This function is expected to
640 * block, returning only when the response has arrived, (or when an
641 * error is detected. The return value is the result of the
642 * operation.
643 */
644int gb_operation_request_send_sync(struct gb_operation *operation)
645{
646 int ret;
7bad4e85 647 unsigned long timeout;
c25572ca
AE
648
649 ret = gb_operation_request_send(operation, gb_operation_sync_callback);
650 if (ret)
d90c25b0 651 return ret;
8350e7a0 652
7bad4e85
PH
653 timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
654 ret = wait_for_completion_interruptible_timeout(&operation->completion, timeout);
655 if (ret < 0) {
656 /* Cancel the operation if interrupted */
1a365154 657 gb_operation_cancel(operation, -ECANCELED);
7bad4e85
PH
658 } else if (ret == 0) {
659 /* Cancel the operation if op timed out */
660 gb_operation_cancel(operation, -ETIMEDOUT);
661 }
2cf72a23 662
ba986b5a 663 return gb_operation_result(operation);
d90c25b0 664}
df469a94 665EXPORT_SYMBOL_GPL(gb_operation_request_send_sync);
d90c25b0
AE
666
667/*
82e26f73
AE
668 * Send a response for an incoming operation request. A non-zero
669 * errno indicates a failed operation.
670 *
671 * If there is any response payload, the incoming request handler is
672 * responsible for allocating the response message. Otherwise the
673 * it can simply supply the result errno; this function will
674 * allocate the response message if necessary.
d90c25b0 675 */
d2d2c0fe 676int gb_operation_response_send(struct gb_operation *operation, int errno)
d90c25b0 677{
e1baa3f0 678 struct gb_connection *connection = operation->connection;
0fb5acc4
JH
679 int ret;
680
d2d2c0fe
AE
681 /* Record the result */
682 if (!gb_operation_result_set(operation, errno)) {
e1baa3f0 683 dev_err(&connection->dev, "request result already set\n");
d2d2c0fe
AE
684 return -EIO; /* Shouldn't happen */
685 }
d90c25b0 686
1d771fe4
JH
687 /* Sender of request does not care about response. */
688 if (!operation->id)
689 return 0;
690
82e26f73
AE
691 if (!operation->response) {
692 if (!gb_operation_response_alloc(operation, 0)) {
e1baa3f0
JH
693 dev_err(&connection->dev,
694 "error allocating response\n");
82e26f73
AE
695 /* XXX Respond with pre-allocated -ENOMEM? */
696 return -ENOMEM;
697 }
698 }
699
0fb5acc4
JH
700 /* Reference will be dropped when message has been sent. */
701 gb_operation_get(operation);
702
82e26f73
AE
703 /* Fill in the response header and send it */
704 operation->response->header->result = gb_operation_errno_map(errno);
705
0fb5acc4
JH
706 ret = gb_message_send(operation->response);
707 if (ret)
708 gb_operation_put(operation);
709
710 return ret;
d90c25b0 711}
5f345a5d 712EXPORT_SYMBOL_GPL(gb_operation_response_send);
d90c25b0 713
d98b52b0 714/*
7cf7bca9 715 * This function is called when a message send request has completed.
d98b52b0 716 */
7cf7bca9
JH
717void greybus_message_sent(struct greybus_host_device *hd,
718 struct gb_message *message, int status)
d98b52b0 719{
d98b52b0
AE
720 struct gb_operation *operation;
721
d4a1ff67 722 /* Get the message and record that it is no longer in flight */
43cdae5c
AE
723 message->cookie = NULL;
724
d4a1ff67
AE
725 /*
726 * If the message was a response, we just need to drop our
727 * reference to the operation. If an error occurred, report
728 * it.
729 *
730 * For requests, if there's no error, there's nothing more
731 * to do until the response arrives. If an error occurred
732 * attempting to send it, record that as the result of
733 * the operation and schedule its completion.
734 */
d98b52b0 735 operation = message->operation;
d4a1ff67 736 if (message == operation->response) {
e1baa3f0
JH
737 if (status) {
738 dev_err(&operation->connection->dev,
739 "error sending response: %d\n", status);
740 }
d4a1ff67
AE
741 gb_operation_put(operation);
742 } else if (status) {
743 if (gb_operation_result_set(operation, status))
744 queue_work(gb_operation_workqueue, &operation->work);
745 }
d98b52b0 746}
7cf7bca9 747EXPORT_SYMBOL_GPL(greybus_message_sent);
d98b52b0 748
2eb585f8 749/*
d37b1db1
AE
750 * We've received data on a connection, and it doesn't look like a
751 * response, so we assume it's a request.
78496db0
AE
752 *
753 * This is called in interrupt context, so just copy the incoming
d37b1db1
AE
754 * data into the request buffer and handle the rest via workqueue.
755 */
85a04428 756static void gb_connection_recv_request(struct gb_connection *connection,
82b5e3fe
AE
757 u16 operation_id, u8 type,
758 void *data, size_t size)
d37b1db1
AE
759{
760 struct gb_operation *operation;
761
34db1f91 762 operation = gb_operation_create_incoming(connection, operation_id,
82b5e3fe 763 type, data, size);
d37b1db1 764 if (!operation) {
25eb7329 765 dev_err(&connection->dev, "can't create operation\n");
d37b1db1
AE
766 return; /* XXX Respond with pre-allocated ENOMEM */
767 }
d37b1db1 768
d4a1ff67
AE
769 /*
770 * Incoming requests are handled by arranging for the
771 * request handler to be the operation's callback function.
772 *
773 * The last thing the handler does is send a response
0fb5acc4
JH
774 * message. The initial reference to the operation will be
775 * dropped when the handler returns.
d4a1ff67
AE
776 */
777 operation->callback = gb_operation_request_handle;
778 if (gb_operation_result_set(operation, -EINPROGRESS))
abe9a300 779 queue_work(gb_operation_workqueue, &operation->work);
d37b1db1
AE
780}
781
782/*
783 * We've received data that appears to be an operation response
784 * message. Look up the operation, and record that we've received
696e0cca 785 * its response.
78496db0 786 *
d37b1db1
AE
787 * This is called in interrupt context, so just copy the incoming
788 * data into the response buffer and handle the rest via workqueue.
789 */
790static void gb_connection_recv_response(struct gb_connection *connection,
64ce39a3 791 u16 operation_id, u8 result, void *data, size_t size)
d37b1db1
AE
792{
793 struct gb_operation *operation;
794 struct gb_message *message;
64ce39a3 795 int errno = gb_operation_status_map(result);
7cfa6995 796 size_t message_size;
d37b1db1 797
afb2e134 798 operation = gb_operation_find(connection, operation_id);
d37b1db1 799 if (!operation) {
25eb7329 800 dev_err(&connection->dev, "operation not found\n");
d37b1db1
AE
801 return;
802 }
803
c08b1dda 804 message = operation->response;
7cfa6995
AE
805 message_size = sizeof(*message->header) + message->payload_size;
806 if (!errno && size != message_size) {
25eb7329 807 dev_err(&connection->dev, "bad message size (%zu != %zu)\n",
7cfa6995 808 size, message_size);
64ce39a3 809 errno = -EMSGSIZE;
d37b1db1 810 }
d37b1db1 811
25d0f81a 812 /* We must ignore the payload if a bad status is returned */
64ce39a3 813 if (errno)
f71e1cc1
AE
814 size = sizeof(*message->header);
815 memcpy(message->header, data, size);
d37b1db1
AE
816
817 /* The rest will be handled in work queue context */
64ce39a3 818 if (gb_operation_result_set(operation, errno))
abe9a300 819 queue_work(gb_operation_workqueue, &operation->work);
d37b1db1
AE
820}
821
822/*
823 * Handle data arriving on a connection. As soon as we return the
824 * supplied data buffer will be reused (so unless we do something
825 * with, it's effectively dropped).
2eb585f8 826 */
61089e89 827void gb_connection_recv(struct gb_connection *connection,
d90c25b0
AE
828 void *data, size_t size)
829{
564c72b1 830 struct gb_operation_msg_hdr header;
d37b1db1
AE
831 size_t msg_size;
832 u16 operation_id;
d90c25b0 833
d37b1db1 834 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
25eb7329 835 dev_err(&connection->dev, "dropping %zu received bytes\n",
d37b1db1 836 size);
36561f23 837 return;
d37b1db1 838 }
36561f23 839
564c72b1 840 if (size < sizeof(header)) {
25eb7329 841 dev_err(&connection->dev, "message too small\n");
d90c25b0
AE
842 return;
843 }
844
564c72b1
JH
845 /* Use memcpy as data may be unaligned */
846 memcpy(&header, data, sizeof(header));
847 msg_size = le16_to_cpu(header.size);
0150bd7f
JH
848 if (size < msg_size) {
849 dev_err(&connection->dev,
850 "incomplete message received: 0x%04x (%zu < %zu)\n",
564c72b1 851 le16_to_cpu(header.operation_id), size, msg_size);
d37b1db1 852 return; /* XXX Should still complete operation */
d90c25b0
AE
853 }
854
564c72b1 855 operation_id = le16_to_cpu(header.operation_id);
6d653370 856 if (header.type & GB_MESSAGE_TYPE_RESPONSE)
d37b1db1 857 gb_connection_recv_response(connection, operation_id,
564c72b1 858 header.result, data, msg_size);
d37b1db1
AE
859 else
860 gb_connection_recv_request(connection, operation_id,
564c72b1 861 header.type, data, msg_size);
2eb585f8
AE
862}
863
e1158df0 864/*
f68c05c0 865 * Cancel an operation, and record the given error to indicate why.
e1158df0 866 */
f68c05c0 867void gb_operation_cancel(struct gb_operation *operation, int errno)
e1158df0 868{
abe9a300
AE
869 if (gb_operation_result_set(operation, errno)) {
870 gb_message_cancel(operation->request);
772f3e90
JH
871 if (operation->response)
872 gb_message_cancel(operation->response);
abe9a300 873 }
a1f2e40b 874 gb_operation_put(operation);
e1158df0 875}
1dad6b35 876EXPORT_SYMBOL_GPL(gb_operation_cancel);
e1158df0 877
10aa801d
GKH
878/**
879 * gb_operation_sync: implement a "simple" synchronous gb operation.
880 * @connection: the Greybus connection to send this to
881 * @type: the type of operation to send
882 * @request: pointer to a memory buffer to copy the request from
883 * @request_size: size of @request
884 * @response: pointer to a memory buffer to copy the response to
885 * @response_size: the size of @response.
886 *
887 * This function implements a simple synchronous Greybus operation. It sends
888 * the provided operation request and waits (sleeps) until the corresponding
889 * operation response message has been successfully received, or an error
890 * occurs. @request and @response are buffers to hold the request and response
891 * data respectively, and if they are not NULL, their size must be specified in
892 * @request_size and @response_size.
893 *
894 * If a response payload is to come back, and @response is not NULL,
895 * @response_size number of bytes will be copied into @response if the operation
896 * is successful.
897 *
898 * If there is an error, the response buffer is left alone.
899 */
900int gb_operation_sync(struct gb_connection *connection, int type,
901 void *request, int request_size,
902 void *response, int response_size)
903{
904 struct gb_operation *operation;
905 int ret;
906
907 if ((response_size && !response) ||
908 (request_size && !request))
909 return -EINVAL;
910
911 operation = gb_operation_create(connection, type,
912 request_size, response_size);
913 if (!operation)
914 return -ENOMEM;
915
916 if (request_size)
6cd6ec55 917 memcpy(operation->request->payload, request, request_size);
10aa801d 918
c25572ca 919 ret = gb_operation_request_send_sync(operation);
ee8f81b0
JH
920 if (ret) {
921 dev_err(&connection->dev, "synchronous operation failed: %d\n",
922 ret);
923 } else {
924 if (response_size) {
10aa801d
GKH
925 memcpy(response, operation->response->payload,
926 response_size);
ee8f81b0
JH
927 }
928 }
10aa801d
GKH
929 gb_operation_destroy(operation);
930
931 return ret;
932}
df469a94 933EXPORT_SYMBOL_GPL(gb_operation_sync);
10aa801d 934
47ed2c92 935int __init gb_operation_init(void)
2eb585f8 936{
1e5613b4
JH
937 gb_message_cache = kmem_cache_create("gb_message_cache",
938 sizeof(struct gb_message), 0, 0, NULL);
939 if (!gb_message_cache)
0cffcac3
AE
940 return -ENOMEM;
941
5b3db0dd
AE
942 gb_operation_cache = kmem_cache_create("gb_operation_cache",
943 sizeof(struct gb_operation), 0, 0, NULL);
944 if (!gb_operation_cache)
1e5613b4 945 goto err_destroy_message_cache;
5b3db0dd 946
ee637a9b 947 gb_operation_workqueue = alloc_workqueue("greybus_operation", 0, 1);
0cffcac3
AE
948 if (!gb_operation_workqueue)
949 goto err_operation;
2eb585f8
AE
950
951 return 0;
0cffcac3
AE
952err_operation:
953 kmem_cache_destroy(gb_operation_cache);
954 gb_operation_cache = NULL;
1e5613b4
JH
955err_destroy_message_cache:
956 kmem_cache_destroy(gb_message_cache);
957 gb_message_cache = NULL;
0cffcac3
AE
958
959 return -ENOMEM;
2eb585f8
AE
960}
961
47ed2c92 962void __exit gb_operation_exit(void)
2eb585f8 963{
ee637a9b
AE
964 destroy_workqueue(gb_operation_workqueue);
965 gb_operation_workqueue = NULL;
837b3b7c
VK
966 kmem_cache_destroy(gb_operation_cache);
967 gb_operation_cache = NULL;
1e5613b4
JH
968 kmem_cache_destroy(gb_message_cache);
969 gb_message_cache = NULL;
d90c25b0 970}
This page took 0.291095 seconds and 4 git commands to generate.