1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/mei.h>
21 * mei_me_cl_init - initialize me client
25 void mei_me_cl_init(struct mei_me_client *me_cl)
27 INIT_LIST_HEAD(&me_cl->list);
28 kref_init(&me_cl->refcnt);
32 * mei_me_cl_get - increases me client refcount
36 * Locking: called under "dev->device_lock" lock
38 * Return: me client or NULL
40 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
49 * mei_me_cl_release - free me client
51 * Locking: called under "dev->device_lock" lock
53 * @ref: me_client refcount
55 static void mei_me_cl_release(struct kref *ref)
57 struct mei_me_client *me_cl =
58 container_of(ref, struct mei_me_client, refcnt);
64 * mei_me_cl_put - decrease me client refcount and free client if necessary
66 * Locking: called under "dev->device_lock" lock
70 void mei_me_cl_put(struct mei_me_client *me_cl)
73 kref_put(&me_cl->refcnt, mei_me_cl_release);
77 * __mei_me_cl_del - delete me client from the list and decrease
83 * Locking: dev->me_clients_rwsem
85 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
90 list_del_init(&me_cl->list);
95 * mei_me_cl_del - delete me client from the list and decrease
101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
103 down_write(&dev->me_clients_rwsem);
104 __mei_me_cl_del(dev, me_cl);
105 up_write(&dev->me_clients_rwsem);
109 * mei_me_cl_add - add me client to the list
114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
116 down_write(&dev->me_clients_rwsem);
117 list_add(&me_cl->list, &dev->me_clients);
118 up_write(&dev->me_clients_rwsem);
122 * __mei_me_cl_by_uuid - locate me client by uuid
123 * increases ref count
126 * @uuid: me client uuid
128 * Return: me client or NULL if not found
130 * Locking: dev->me_clients_rwsem
132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
135 struct mei_me_client *me_cl;
138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
140 list_for_each_entry(me_cl, &dev->me_clients, list) {
141 pn = &me_cl->props.protocol_name;
142 if (uuid_le_cmp(*uuid, *pn) == 0)
143 return mei_me_cl_get(me_cl);
150 * mei_me_cl_by_uuid - locate me client by uuid
151 * increases ref count
154 * @uuid: me client uuid
156 * Return: me client or NULL if not found
158 * Locking: dev->me_clients_rwsem
160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
163 struct mei_me_client *me_cl;
165 down_read(&dev->me_clients_rwsem);
166 me_cl = __mei_me_cl_by_uuid(dev, uuid);
167 up_read(&dev->me_clients_rwsem);
173 * mei_me_cl_by_id - locate me client by client id
174 * increases ref count
176 * @dev: the device structure
177 * @client_id: me client id
179 * Return: me client or NULL if not found
181 * Locking: dev->me_clients_rwsem
183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
186 struct mei_me_client *__me_cl, *me_cl = NULL;
188 down_read(&dev->me_clients_rwsem);
189 list_for_each_entry(__me_cl, &dev->me_clients, list) {
190 if (__me_cl->client_id == client_id) {
191 me_cl = mei_me_cl_get(__me_cl);
195 up_read(&dev->me_clients_rwsem);
201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
202 * increases ref count
204 * @dev: the device structure
205 * @uuid: me client uuid
206 * @client_id: me client id
208 * Return: me client or null if not found
210 * Locking: dev->me_clients_rwsem
212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
213 const uuid_le *uuid, u8 client_id)
215 struct mei_me_client *me_cl;
218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
220 list_for_each_entry(me_cl, &dev->me_clients, list) {
221 pn = &me_cl->props.protocol_name;
222 if (uuid_le_cmp(*uuid, *pn) == 0 &&
223 me_cl->client_id == client_id)
224 return mei_me_cl_get(me_cl);
232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
233 * increases ref count
235 * @dev: the device structure
236 * @uuid: me client uuid
237 * @client_id: me client id
239 * Return: me client or null if not found
241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
242 const uuid_le *uuid, u8 client_id)
244 struct mei_me_client *me_cl;
246 down_read(&dev->me_clients_rwsem);
247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
248 up_read(&dev->me_clients_rwsem);
254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
256 * @dev: the device structure
257 * @uuid: me client uuid
259 * Locking: called under "dev->device_lock" lock
261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
263 struct mei_me_client *me_cl;
265 dev_dbg(dev->dev, "remove %pUl\n", uuid);
267 down_write(&dev->me_clients_rwsem);
268 me_cl = __mei_me_cl_by_uuid(dev, uuid);
269 __mei_me_cl_del(dev, me_cl);
270 mei_me_cl_put(me_cl);
271 up_write(&dev->me_clients_rwsem);
275 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
277 * @dev: the device structure
278 * @uuid: me client uuid
281 * Locking: called under "dev->device_lock" lock
283 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
285 struct mei_me_client *me_cl;
287 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
289 down_write(&dev->me_clients_rwsem);
290 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
291 __mei_me_cl_del(dev, me_cl);
292 mei_me_cl_put(me_cl);
293 up_write(&dev->me_clients_rwsem);
297 * mei_me_cl_rm_all - remove all me clients
299 * @dev: the device structure
301 * Locking: called under "dev->device_lock" lock
303 void mei_me_cl_rm_all(struct mei_device *dev)
305 struct mei_me_client *me_cl, *next;
307 down_write(&dev->me_clients_rwsem);
308 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
309 __mei_me_cl_del(dev, me_cl);
310 up_write(&dev->me_clients_rwsem);
314 * mei_io_cb_free - free mei_cb_private related memory
316 * @cb: mei callback struct
318 void mei_io_cb_free(struct mei_cl_cb *cb)
330 * mei_tx_cb_enqueue - queue tx callback
332 * Locking: called under "dev->device_lock" lock
334 * @cb: mei callback struct
335 * @head: an instance of list to queue on
337 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
338 struct list_head *head)
340 list_add_tail(&cb->list, head);
341 cb->cl->tx_cb_queued++;
345 * mei_tx_cb_dequeue - dequeue tx callback
347 * Locking: called under "dev->device_lock" lock
349 * @cb: mei callback struct to dequeue and free
351 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
353 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
354 cb->cl->tx_cb_queued--;
360 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
362 * Locking: called under "dev->device_lock" lock
365 * @fp: pointer to file structure
367 static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
368 const struct file *fp)
370 struct mei_cl_vtag *cl_vtag;
372 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
373 if (cl_vtag->fp == fp) {
374 cl_vtag->pending_read = true;
381 * mei_io_cb_init - allocate and initialize io callback
384 * @type: operation type
385 * @fp: pointer to file structure
387 * Return: mei_cl_cb pointer or NULL;
389 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
390 enum mei_cb_file_ops type,
391 const struct file *fp)
393 struct mei_cl_cb *cb;
395 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
399 INIT_LIST_HEAD(&cb->list);
411 * mei_io_list_flush_cl - removes cbs belonging to the cl.
413 * @head: an instance of our list structure
416 static void mei_io_list_flush_cl(struct list_head *head,
417 const struct mei_cl *cl)
419 struct mei_cl_cb *cb, *next;
421 list_for_each_entry_safe(cb, next, head, list) {
423 list_del_init(&cb->list);
424 if (cb->fop_type == MEI_FOP_READ)
431 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
433 * @head: An instance of our list structure
435 * @fp: file pointer (matching cb file object), may be NULL
437 static void mei_io_tx_list_free_cl(struct list_head *head,
438 const struct mei_cl *cl,
439 const struct file *fp)
441 struct mei_cl_cb *cb, *next;
443 list_for_each_entry_safe(cb, next, head, list) {
444 if (cl == cb->cl && (!fp || fp == cb->fp))
445 mei_tx_cb_dequeue(cb);
450 * mei_io_list_free_fp - free cb from a list that matches file pointer
453 * @fp: file pointer (matching cb file object), may be NULL
455 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
457 struct mei_cl_cb *cb, *next;
459 list_for_each_entry_safe(cb, next, head, list)
460 if (!fp || fp == cb->fp)
465 * mei_cl_free_pending - free pending cb
469 static void mei_cl_free_pending(struct mei_cl *cl)
471 struct mei_cl_cb *cb;
473 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
478 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
481 * @length: size of the buffer
482 * @fop_type: operation type
483 * @fp: associated file pointer (might be NULL)
485 * Return: cb on success and NULL on failure
487 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
488 enum mei_cb_file_ops fop_type,
489 const struct file *fp)
491 struct mei_cl_cb *cb;
493 cb = mei_io_cb_init(cl, fop_type, fp);
500 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
505 cb->buf.size = length;
511 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
512 * and enqueuing of the control commands cb
515 * @length: size of the buffer
516 * @fop_type: operation type
517 * @fp: associated file pointer (might be NULL)
519 * Return: cb on success and NULL on failure
520 * Locking: called under "dev->device_lock" lock
522 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
523 enum mei_cb_file_ops fop_type,
524 const struct file *fp)
526 struct mei_cl_cb *cb;
528 /* for RX always allocate at least client's mtu */
530 length = max_t(size_t, length, mei_cl_mtu(cl));
532 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
536 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
541 * mei_cl_read_cb - find this cl's callback in the read list
542 * for a specific file
545 * @fp: file pointer (matching cb file object), may be NULL
547 * Return: cb on success, NULL if cb is not found
549 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
551 struct mei_cl_cb *cb;
552 struct mei_cl_cb *ret_cb = NULL;
554 spin_lock(&cl->rd_completed_lock);
555 list_for_each_entry(cb, &cl->rd_completed, list)
556 if (!fp || fp == cb->fp) {
560 spin_unlock(&cl->rd_completed_lock);
565 * mei_cl_flush_queues - flushes queue lists belonging to cl.
568 * @fp: file pointer (matching cb file object), may be NULL
570 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
572 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
574 struct mei_device *dev;
576 if (WARN_ON(!cl || !cl->dev))
581 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
582 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
583 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
584 /* free pending and control cb only in final flush */
586 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
587 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
588 mei_cl_free_pending(cl);
590 spin_lock(&cl->rd_completed_lock);
591 mei_io_list_free_fp(&cl->rd_completed, fp);
592 spin_unlock(&cl->rd_completed_lock);
598 * mei_cl_init - initializes cl.
600 * @cl: host client to be initialized
603 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
605 memset(cl, 0, sizeof(*cl));
606 init_waitqueue_head(&cl->wait);
607 init_waitqueue_head(&cl->rx_wait);
608 init_waitqueue_head(&cl->tx_wait);
609 init_waitqueue_head(&cl->ev_wait);
610 INIT_LIST_HEAD(&cl->vtag_map);
611 spin_lock_init(&cl->rd_completed_lock);
612 INIT_LIST_HEAD(&cl->rd_completed);
613 INIT_LIST_HEAD(&cl->rd_pending);
614 INIT_LIST_HEAD(&cl->link);
615 cl->writing_state = MEI_IDLE;
616 cl->state = MEI_FILE_UNINITIALIZED;
621 * mei_cl_allocate - allocates cl structure and sets it up.
624 * Return: The allocated file or NULL on failure
626 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
630 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
634 mei_cl_init(cl, dev);
640 * mei_cl_link - allocate host id in the host map
644 * Return: 0 on success
645 * -EINVAL on incorrect values
646 * -EMFILE if open count exceeded.
648 int mei_cl_link(struct mei_cl *cl)
650 struct mei_device *dev;
653 if (WARN_ON(!cl || !cl->dev))
658 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
659 if (id >= MEI_CLIENTS_MAX) {
660 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
664 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
665 dev_err(dev->dev, "open_handle_count exceeded %d",
666 MEI_MAX_OPEN_HANDLE_COUNT);
670 dev->open_handle_count++;
672 cl->host_client_id = id;
673 list_add_tail(&cl->link, &dev->file_list);
675 set_bit(id, dev->host_clients_map);
677 cl->state = MEI_FILE_INITIALIZING;
679 cl_dbg(dev, cl, "link cl\n");
684 * mei_cl_unlink - remove host client from the list
690 int mei_cl_unlink(struct mei_cl *cl)
692 struct mei_device *dev;
694 /* don't shout on error exit path */
698 if (WARN_ON(!cl->dev))
703 cl_dbg(dev, cl, "unlink client");
705 if (cl->state == MEI_FILE_UNINITIALIZED)
708 if (dev->open_handle_count > 0)
709 dev->open_handle_count--;
711 /* never clear the 0 bit */
712 if (cl->host_client_id)
713 clear_bit(cl->host_client_id, dev->host_clients_map);
715 list_del_init(&cl->link);
717 cl->state = MEI_FILE_UNINITIALIZED;
718 cl->writing_state = MEI_IDLE;
720 WARN_ON(!list_empty(&cl->rd_completed) ||
721 !list_empty(&cl->rd_pending) ||
722 !list_empty(&cl->link));
727 void mei_host_client_init(struct mei_device *dev)
729 mei_set_devstate(dev, MEI_DEV_ENABLED);
730 dev->reset_count = 0;
732 schedule_work(&dev->bus_rescan_work);
734 pm_runtime_mark_last_busy(dev->dev);
735 dev_dbg(dev->dev, "rpm: autosuspend\n");
736 pm_request_autosuspend(dev->dev);
740 * mei_hbuf_acquire - try to acquire host buffer
742 * @dev: the device structure
743 * Return: true if host buffer was acquired
745 bool mei_hbuf_acquire(struct mei_device *dev)
747 if (mei_pg_state(dev) == MEI_PG_ON ||
748 mei_pg_in_transition(dev)) {
749 dev_dbg(dev->dev, "device is in pg\n");
753 if (!dev->hbuf_is_ready) {
754 dev_dbg(dev->dev, "hbuf is not ready\n");
758 dev->hbuf_is_ready = false;
764 * mei_cl_wake_all - wake up readers, writers and event waiters so
765 * they can be interrupted
769 static void mei_cl_wake_all(struct mei_cl *cl)
771 struct mei_device *dev = cl->dev;
773 /* synchronized under device mutex */
774 if (waitqueue_active(&cl->rx_wait)) {
775 cl_dbg(dev, cl, "Waking up reading client!\n");
776 wake_up_interruptible(&cl->rx_wait);
778 /* synchronized under device mutex */
779 if (waitqueue_active(&cl->tx_wait)) {
780 cl_dbg(dev, cl, "Waking up writing client!\n");
781 wake_up_interruptible(&cl->tx_wait);
783 /* synchronized under device mutex */
784 if (waitqueue_active(&cl->ev_wait)) {
785 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
786 wake_up_interruptible(&cl->ev_wait);
788 /* synchronized under device mutex */
789 if (waitqueue_active(&cl->wait)) {
790 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
796 * mei_cl_set_disconnected - set disconnected state and clear
797 * associated states and resources
801 static void mei_cl_set_disconnected(struct mei_cl *cl)
803 struct mei_device *dev = cl->dev;
805 if (cl->state == MEI_FILE_DISCONNECTED ||
806 cl->state <= MEI_FILE_INITIALIZING)
809 cl->state = MEI_FILE_DISCONNECTED;
810 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
811 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
812 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
813 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
815 cl->rx_flow_ctrl_creds = 0;
816 cl->tx_flow_ctrl_creds = 0;
822 if (!WARN_ON(cl->me_cl->connect_count == 0))
823 cl->me_cl->connect_count--;
825 if (cl->me_cl->connect_count == 0)
826 cl->me_cl->tx_flow_ctrl_creds = 0;
828 mei_me_cl_put(cl->me_cl);
832 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
834 if (!mei_me_cl_get(me_cl))
837 /* only one connection is allowed for fixed address clients */
838 if (me_cl->props.fixed_address) {
839 if (me_cl->connect_count) {
840 mei_me_cl_put(me_cl);
846 cl->state = MEI_FILE_CONNECTING;
847 cl->me_cl->connect_count++;
853 * mei_cl_send_disconnect - send disconnect request
856 * @cb: callback block
858 * Return: 0, OK; otherwise, error.
860 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
862 struct mei_device *dev;
867 ret = mei_hbm_cl_disconnect_req(dev, cl);
870 cl->state = MEI_FILE_DISCONNECT_REPLY;
874 list_move_tail(&cb->list, &dev->ctrl_rd_list);
875 cl->timer_count = dev->timeouts.connect;
876 mei_schedule_stall_timer(dev);
882 * mei_cl_irq_disconnect - processes close related operation from
883 * interrupt thread context - send disconnect request
886 * @cb: callback block.
887 * @cmpl_list: complete list.
889 * Return: 0, OK; otherwise, error.
891 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
892 struct list_head *cmpl_list)
894 struct mei_device *dev = cl->dev;
899 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
900 slots = mei_hbuf_empty_slots(dev);
904 if ((u32)slots < msg_slots)
907 ret = mei_cl_send_disconnect(cl, cb);
909 list_move_tail(&cb->list, cmpl_list);
915 * __mei_cl_disconnect - disconnect host client from the me one
916 * internal function runtime pm has to be already acquired
920 * Return: 0 on success, <0 on failure.
922 static int __mei_cl_disconnect(struct mei_cl *cl)
924 struct mei_device *dev;
925 struct mei_cl_cb *cb;
930 cl->state = MEI_FILE_DISCONNECTING;
932 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
938 if (mei_hbuf_acquire(dev)) {
939 rets = mei_cl_send_disconnect(cl, cb);
941 cl_err(dev, cl, "failed to disconnect.\n");
946 mutex_unlock(&dev->device_lock);
947 wait_event_timeout(cl->wait,
948 cl->state == MEI_FILE_DISCONNECT_REPLY ||
949 cl->state == MEI_FILE_DISCONNECTED,
950 dev->timeouts.cl_connect);
951 mutex_lock(&dev->device_lock);
954 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
955 cl->state != MEI_FILE_DISCONNECTED) {
956 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
961 /* we disconnect also on error */
962 mei_cl_set_disconnected(cl);
964 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
971 * mei_cl_disconnect - disconnect host client from the me one
975 * Locking: called under "dev->device_lock" lock
977 * Return: 0 on success, <0 on failure.
979 int mei_cl_disconnect(struct mei_cl *cl)
981 struct mei_device *dev;
984 if (WARN_ON(!cl || !cl->dev))
989 cl_dbg(dev, cl, "disconnecting");
991 if (!mei_cl_is_connected(cl))
994 if (mei_cl_is_fixed_address(cl)) {
995 mei_cl_set_disconnected(cl);
999 if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
1000 dev->dev_state == MEI_DEV_POWER_DOWN) {
1001 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
1002 mei_cl_set_disconnected(cl);
1006 rets = pm_runtime_get(dev->dev);
1007 if (rets < 0 && rets != -EINPROGRESS) {
1008 pm_runtime_put_noidle(dev->dev);
1009 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1013 rets = __mei_cl_disconnect(cl);
1015 cl_dbg(dev, cl, "rpm: autosuspend\n");
1016 pm_runtime_mark_last_busy(dev->dev);
1017 pm_runtime_put_autosuspend(dev->dev);
1024 * mei_cl_is_other_connecting - checks if other
1025 * client with the same me client id is connecting
1027 * @cl: private data of the file object
1029 * Return: true if other client is connected, false - otherwise.
1031 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
1033 struct mei_device *dev;
1034 struct mei_cl_cb *cb;
1038 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
1039 if (cb->fop_type == MEI_FOP_CONNECT &&
1040 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1048 * mei_cl_send_connect - send connect request
1051 * @cb: callback block
1053 * Return: 0, OK; otherwise, error.
1055 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1057 struct mei_device *dev;
1062 ret = mei_hbm_cl_connect_req(dev, cl);
1065 cl->state = MEI_FILE_DISCONNECT_REPLY;
1069 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1070 cl->timer_count = dev->timeouts.connect;
1071 mei_schedule_stall_timer(dev);
1076 * mei_cl_irq_connect - send connect request in irq_thread context
1079 * @cb: callback block
1080 * @cmpl_list: complete list
1082 * Return: 0, OK; otherwise, error.
1084 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1085 struct list_head *cmpl_list)
1087 struct mei_device *dev = cl->dev;
1092 if (mei_cl_is_other_connecting(cl))
1095 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1096 slots = mei_hbuf_empty_slots(dev);
1100 if ((u32)slots < msg_slots)
1103 rets = mei_cl_send_connect(cl, cb);
1105 list_move_tail(&cb->list, cmpl_list);
1111 * mei_cl_connect - connect host client to the me one
1115 * @fp: pointer to file structure
1117 * Locking: called under "dev->device_lock" lock
1119 * Return: 0 on success, <0 on failure.
1121 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1122 const struct file *fp)
1124 struct mei_device *dev;
1125 struct mei_cl_cb *cb;
1128 if (WARN_ON(!cl || !cl->dev || !me_cl))
1133 rets = mei_cl_set_connecting(cl, me_cl);
1137 if (mei_cl_is_fixed_address(cl)) {
1138 cl->state = MEI_FILE_CONNECTED;
1143 rets = pm_runtime_get(dev->dev);
1144 if (rets < 0 && rets != -EINPROGRESS) {
1145 pm_runtime_put_noidle(dev->dev);
1146 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1150 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1156 /* run hbuf acquire last so we don't have to undo */
1157 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1158 rets = mei_cl_send_connect(cl, cb);
1163 mutex_unlock(&dev->device_lock);
1164 wait_event_timeout(cl->wait,
1165 (cl->state == MEI_FILE_CONNECTED ||
1166 cl->state == MEI_FILE_DISCONNECTED ||
1167 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1168 cl->state == MEI_FILE_DISCONNECT_REPLY),
1169 dev->timeouts.cl_connect);
1170 mutex_lock(&dev->device_lock);
1172 if (!mei_cl_is_connected(cl)) {
1173 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1174 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1175 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1176 /* ignore disconnect return valuue;
1177 * in case of failure reset will be invoked
1179 __mei_cl_disconnect(cl);
1184 /* timeout or something went really wrong */
1186 cl->status = -EFAULT;
1191 cl_dbg(dev, cl, "rpm: autosuspend\n");
1192 pm_runtime_mark_last_busy(dev->dev);
1193 pm_runtime_put_autosuspend(dev->dev);
1198 if (!mei_cl_is_connected(cl))
1199 mei_cl_set_disconnected(cl);
1205 * mei_cl_alloc_linked - allocate and link host client
1207 * @dev: the device structure
1209 * Return: cl on success ERR_PTR on failure
1211 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1216 cl = mei_cl_allocate(dev);
1222 ret = mei_cl_link(cl);
1229 return ERR_PTR(ret);
1233 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1237 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1239 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1241 if (WARN_ON(!cl || !cl->me_cl))
1244 if (cl->tx_flow_ctrl_creds > 0)
1247 if (mei_cl_is_fixed_address(cl))
1250 if (mei_cl_is_single_recv_buf(cl)) {
1251 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1258 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1265 * -EINVAL when ctrl credits are <= 0
1267 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1269 if (WARN_ON(!cl || !cl->me_cl))
1272 if (mei_cl_is_fixed_address(cl))
1275 if (mei_cl_is_single_recv_buf(cl)) {
1276 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1278 cl->me_cl->tx_flow_ctrl_creds--;
1280 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1282 cl->tx_flow_ctrl_creds--;
1288 * mei_cl_vtag_alloc - allocate and fill the vtag structure
1290 * @fp: pointer to file structure
1294 * * Pointer to allocated struct - on success
1295 * * ERR_PTR(-ENOMEM) on memory allocation failure
1297 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1299 struct mei_cl_vtag *cl_vtag;
1301 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1303 return ERR_PTR(-ENOMEM);
1305 INIT_LIST_HEAD(&cl_vtag->list);
1306 cl_vtag->vtag = vtag;
1313 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1316 * @vtag: virtual tag
1319 * * A file pointer - on success
1320 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1322 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1324 struct mei_cl_vtag *vtag_l;
1326 list_for_each_entry(vtag_l, &cl->vtag_map, list)
1327 /* The client on bus has one fixed fp */
1328 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1329 vtag_l->vtag == vtag)
1332 return ERR_PTR(-ENOENT);
1336 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1341 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1343 struct mei_cl_vtag *vtag_l;
1345 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1346 if (vtag_l->vtag == vtag) {
1347 vtag_l->pending_read = false;
1354 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1359 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1361 struct mei_cl_vtag *cl_vtag;
1363 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1364 if (cl_vtag->pending_read) {
1365 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1369 cl->rx_flow_ctrl_creds++;
1376 * mei_cl_vt_support_check - check if client support vtags
1381 * * 0 - supported, or not connected at all
1382 * * -EOPNOTSUPP - vtags are not supported by client
1384 int mei_cl_vt_support_check(const struct mei_cl *cl)
1386 struct mei_device *dev = cl->dev;
1388 if (!dev->hbm_f_vt_supported)
1394 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1398 * mei_cl_add_rd_completed - add read completed callback to list with lock
1402 * @cb: callback block
1405 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1407 const struct file *fp;
1409 if (!mei_cl_vt_support_check(cl)) {
1410 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1412 /* client already disconnected, discarding */
1417 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1418 mei_cl_read_vtag_add_fc(cl);
1421 spin_lock(&cl->rd_completed_lock);
1422 list_add_tail(&cb->list, &cl->rd_completed);
1423 spin_unlock(&cl->rd_completed_lock);
1427 * mei_cl_del_rd_completed - free read completed callback with lock
1430 * @cb: callback block
1433 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1435 spin_lock(&cl->rd_completed_lock);
1437 spin_unlock(&cl->rd_completed_lock);
1441 * mei_cl_notify_fop2req - convert fop to proper request
1443 * @fop: client notification start response command
1445 * Return: MEI_HBM_NOTIFICATION_START/STOP
1447 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1449 if (fop == MEI_FOP_NOTIFY_START)
1450 return MEI_HBM_NOTIFICATION_START;
1452 return MEI_HBM_NOTIFICATION_STOP;
1456 * mei_cl_notify_req2fop - convert notification request top file operation type
1458 * @req: hbm notification request type
1460 * Return: MEI_FOP_NOTIFY_START/STOP
1462 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1464 if (req == MEI_HBM_NOTIFICATION_START)
1465 return MEI_FOP_NOTIFY_START;
1467 return MEI_FOP_NOTIFY_STOP;
1471 * mei_cl_irq_notify - send notification request in irq_thread context
1474 * @cb: callback block.
1475 * @cmpl_list: complete list.
1477 * Return: 0 on such and error otherwise.
1479 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1480 struct list_head *cmpl_list)
1482 struct mei_device *dev = cl->dev;
1488 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1489 slots = mei_hbuf_empty_slots(dev);
1493 if ((u32)slots < msg_slots)
1496 request = mei_cl_notify_fop2req(cb->fop_type);
1497 ret = mei_hbm_cl_notify_req(dev, cl, request);
1500 list_move_tail(&cb->list, cmpl_list);
1504 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1509 * mei_cl_notify_request - send notification stop/start request
1512 * @fp: associate request with file
1513 * @request: 1 for start or 0 for stop
1515 * Locking: called under "dev->device_lock" lock
1517 * Return: 0 on such and error otherwise.
1519 int mei_cl_notify_request(struct mei_cl *cl,
1520 const struct file *fp, u8 request)
1522 struct mei_device *dev;
1523 struct mei_cl_cb *cb;
1524 enum mei_cb_file_ops fop_type;
1527 if (WARN_ON(!cl || !cl->dev))
1532 if (!dev->hbm_f_ev_supported) {
1533 cl_dbg(dev, cl, "notifications not supported\n");
1537 if (!mei_cl_is_connected(cl))
1540 rets = pm_runtime_get(dev->dev);
1541 if (rets < 0 && rets != -EINPROGRESS) {
1542 pm_runtime_put_noidle(dev->dev);
1543 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1547 fop_type = mei_cl_notify_req2fop(request);
1548 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1554 if (mei_hbuf_acquire(dev)) {
1555 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1559 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1562 mutex_unlock(&dev->device_lock);
1563 wait_event_timeout(cl->wait,
1564 cl->notify_en == request ||
1566 !mei_cl_is_connected(cl),
1567 dev->timeouts.cl_connect);
1568 mutex_lock(&dev->device_lock);
1570 if (cl->notify_en != request && !cl->status)
1571 cl->status = -EFAULT;
1576 cl_dbg(dev, cl, "rpm: autosuspend\n");
1577 pm_runtime_mark_last_busy(dev->dev);
1578 pm_runtime_put_autosuspend(dev->dev);
1585 * mei_cl_notify - raise notification
1589 * Locking: called under "dev->device_lock" lock
1591 void mei_cl_notify(struct mei_cl *cl)
1593 struct mei_device *dev;
1595 if (!cl || !cl->dev)
1603 cl_dbg(dev, cl, "notify event");
1604 cl->notify_ev = true;
1605 if (!mei_cl_bus_notify_event(cl))
1606 wake_up_interruptible(&cl->ev_wait);
1609 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1614 * mei_cl_notify_get - get or wait for notification event
1617 * @block: this request is blocking
1618 * @notify_ev: true if notification event was received
1620 * Locking: called under "dev->device_lock" lock
1622 * Return: 0 on such and error otherwise.
1624 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1626 struct mei_device *dev;
1631 if (WARN_ON(!cl || !cl->dev))
1636 if (!dev->hbm_f_ev_supported) {
1637 cl_dbg(dev, cl, "notifications not supported\n");
1641 if (!mei_cl_is_connected(cl))
1650 mutex_unlock(&dev->device_lock);
1651 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1652 mutex_lock(&dev->device_lock);
1658 *notify_ev = cl->notify_ev;
1659 cl->notify_ev = false;
1664 * mei_cl_read_start - the start read client message function.
1667 * @length: number of bytes to read
1668 * @fp: pointer to file structure
1670 * Return: 0 on success, <0 on failure.
1672 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1674 struct mei_device *dev;
1675 struct mei_cl_cb *cb;
1678 if (WARN_ON(!cl || !cl->dev))
1683 if (!mei_cl_is_connected(cl))
1686 if (!mei_me_cl_is_active(cl->me_cl)) {
1687 cl_err(dev, cl, "no such me client\n");
1691 if (mei_cl_is_fixed_address(cl))
1694 /* HW currently supports only one pending read */
1695 if (cl->rx_flow_ctrl_creds) {
1696 mei_cl_set_read_by_fp(cl, fp);
1700 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1704 mei_cl_set_read_by_fp(cl, fp);
1706 rets = pm_runtime_get(dev->dev);
1707 if (rets < 0 && rets != -EINPROGRESS) {
1708 pm_runtime_put_noidle(dev->dev);
1709 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1714 if (mei_hbuf_acquire(dev)) {
1715 rets = mei_hbm_cl_flow_control_req(dev, cl);
1719 list_move_tail(&cb->list, &cl->rd_pending);
1721 cl->rx_flow_ctrl_creds++;
1724 cl_dbg(dev, cl, "rpm: autosuspend\n");
1725 pm_runtime_mark_last_busy(dev->dev);
1726 pm_runtime_put_autosuspend(dev->dev);
1734 static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
1736 struct mei_ext_hdr_vtag *vtag_hdr = ext;
1738 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
1739 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
1740 vtag_hdr->vtag = vtag;
1741 vtag_hdr->reserved = 0;
1742 return vtag_hdr->hdr.length;
1745 static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext)
1747 return ext && ext->type == MEI_EXT_HDR_GSC;
1750 static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr)
1752 memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr));
1757 * mei_msg_hdr_init - allocate and initialize mei message header
1759 * @cb: message callback structure
1761 * Return: a pointer to initialized header or ERR_PTR on failure
1763 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1766 struct mei_ext_meta_hdr *meta;
1767 struct mei_msg_hdr *mei_hdr;
1768 bool is_ext, is_hbm, is_gsc, is_vtag;
1769 struct mei_ext_hdr *next_ext;
1772 return ERR_PTR(-EINVAL);
1774 /* Extended header for vtag is attached only on the first fragment */
1775 is_vtag = (cb->vtag && cb->buf_idx == 0);
1776 is_hbm = cb->cl->me_cl->client_id == 0;
1777 is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
1778 is_ext = is_vtag || is_gsc;
1780 /* Compute extended header size */
1781 hdr_len = sizeof(*mei_hdr);
1786 hdr_len += sizeof(*meta);
1788 hdr_len += sizeof(struct mei_ext_hdr_vtag);
1791 hdr_len += mei_ext_hdr_len(cb->ext_hdr);
1794 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1796 return ERR_PTR(-ENOMEM);
1798 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1799 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1800 mei_hdr->internal = cb->internal;
1801 mei_hdr->extended = is_ext;
1806 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1808 next_ext = (struct mei_ext_hdr *)meta->hdrs;
1811 meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
1812 next_ext = mei_ext_next(next_ext);
1817 meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
1818 next_ext = mei_ext_next(next_ext);
1822 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1827 * mei_cl_irq_write - write a message to device
1828 * from the interrupt thread context
1831 * @cb: callback block.
1832 * @cmpl_list: complete list.
1834 * Return: 0, OK; otherwise error.
1836 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1837 struct list_head *cmpl_list)
1839 struct mei_device *dev;
1840 struct mei_msg_data *buf;
1841 struct mei_msg_hdr *mei_hdr = NULL;
1843 size_t hbuf_len, dr_len;
1851 const void *data = NULL;
1853 if (WARN_ON(!cl || !cl->dev))
1860 first_chunk = cb->buf_idx == 0;
1862 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1867 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1872 buf_len = buf->size - cb->buf_idx;
1873 data = buf->data + cb->buf_idx;
1875 hbuf_slots = mei_hbuf_empty_slots(dev);
1876 if (hbuf_slots < 0) {
1881 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1882 dr_slots = mei_dma_ring_empty_slots(dev);
1883 dr_len = mei_slots2data(dr_slots);
1885 mei_hdr = mei_msg_hdr_init(cb);
1886 if (IS_ERR(mei_hdr)) {
1887 rets = PTR_ERR(mei_hdr);
1892 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1895 * Split the message only if we can write the whole host buffer
1896 * otherwise wait for next time the host buffer is empty.
1898 if (hdr_len + buf_len <= hbuf_len) {
1900 mei_hdr->msg_complete = 1;
1901 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1902 mei_hdr->dma_ring = 1;
1903 if (buf_len > dr_len)
1906 mei_hdr->msg_complete = 1;
1908 data_len = sizeof(dma_len);
1911 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1912 buf_len = hbuf_len - hdr_len;
1918 mei_hdr->length += data_len;
1920 if (mei_hdr->dma_ring && buf->data)
1921 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1922 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1928 cl->writing_state = MEI_WRITING;
1929 cb->buf_idx += buf_len;
1932 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1938 if (mei_hdr->msg_complete)
1939 list_move_tail(&cb->list, &dev->write_waiting_list);
1947 list_move_tail(&cb->list, cmpl_list);
1952 * mei_cl_write - submit a write cb to mei device
1953 * assumes device_lock is locked
1956 * @cb: write callback with filled data
1957 * @timeout: send timeout in milliseconds.
1958 * effective only for blocking writes: the cb->blocking is set.
1959 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
1961 * Return: number of bytes sent on success, <0 on failure.
1963 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
1965 struct mei_device *dev;
1966 struct mei_msg_data *buf;
1967 struct mei_msg_hdr *mei_hdr = NULL;
1969 size_t hbuf_len, dr_len;
1979 if (WARN_ON(!cl || !cl->dev))
1988 buf_len = buf->size;
1990 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1992 blocking = cb->blocking;
1995 rets = pm_runtime_get(dev->dev);
1996 if (rets < 0 && rets != -EINPROGRESS) {
1997 pm_runtime_put_noidle(dev->dev);
1998 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
2003 cl->writing_state = MEI_IDLE;
2006 rets = mei_cl_tx_flow_ctrl_creds(cl);
2010 mei_hdr = mei_msg_hdr_init(cb);
2011 if (IS_ERR(mei_hdr)) {
2012 rets = -PTR_ERR(mei_hdr);
2017 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
2020 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
2025 if (!mei_hbuf_acquire(dev)) {
2026 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
2031 hbuf_slots = mei_hbuf_empty_slots(dev);
2032 if (hbuf_slots < 0) {
2037 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
2038 dr_slots = mei_dma_ring_empty_slots(dev);
2039 dr_len = mei_slots2data(dr_slots);
2041 if (hdr_len + buf_len <= hbuf_len) {
2043 mei_hdr->msg_complete = 1;
2044 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
2045 mei_hdr->dma_ring = 1;
2046 if (buf_len > dr_len)
2049 mei_hdr->msg_complete = 1;
2051 data_len = sizeof(dma_len);
2055 buf_len = hbuf_len - hdr_len;
2059 mei_hdr->length += data_len;
2061 if (mei_hdr->dma_ring && buf->data)
2062 mei_dma_ring_write(dev, buf->data, buf_len);
2063 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2068 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
2072 cl->writing_state = MEI_WRITING;
2073 cb->buf_idx = buf_len;
2074 /* restore return value */
2075 buf_len = buf->size;
2078 if (mei_hdr->msg_complete)
2079 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
2081 mei_tx_cb_enqueue(cb, &dev->write_list);
2084 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2086 mutex_unlock(&dev->device_lock);
2087 rets = wait_event_interruptible_timeout(cl->tx_wait,
2088 cl->writing_state == MEI_WRITE_COMPLETE ||
2089 (!mei_cl_is_connected(cl)),
2090 msecs_to_jiffies(timeout));
2091 mutex_lock(&dev->device_lock);
2092 /* clean all queue on timeout as something fatal happened */
2095 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
2096 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
2098 /* wait_event_interruptible returns -ERESTARTSYS */
2102 if (signal_pending(current))
2106 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2114 cl_dbg(dev, cl, "rpm: autosuspend\n");
2115 pm_runtime_mark_last_busy(dev->dev);
2116 pm_runtime_put_autosuspend(dev->dev);
2126 * mei_cl_complete - processes completed operation for a client
2128 * @cl: private data of the file object.
2129 * @cb: callback block.
2131 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2133 struct mei_device *dev = cl->dev;
2135 switch (cb->fop_type) {
2137 mei_tx_cb_dequeue(cb);
2138 cl->writing_state = MEI_WRITE_COMPLETE;
2139 if (waitqueue_active(&cl->tx_wait)) {
2140 wake_up_interruptible(&cl->tx_wait);
2142 pm_runtime_mark_last_busy(dev->dev);
2143 pm_request_autosuspend(dev->dev);
2148 mei_cl_add_rd_completed(cl, cb);
2149 if (!mei_cl_is_fixed_address(cl) &&
2150 !WARN_ON(!cl->rx_flow_ctrl_creds))
2151 cl->rx_flow_ctrl_creds--;
2152 if (!mei_cl_bus_rx_event(cl))
2153 wake_up_interruptible(&cl->rx_wait);
2156 case MEI_FOP_CONNECT:
2157 case MEI_FOP_DISCONNECT:
2158 case MEI_FOP_NOTIFY_STOP:
2159 case MEI_FOP_NOTIFY_START:
2160 case MEI_FOP_DMA_MAP:
2161 case MEI_FOP_DMA_UNMAP:
2162 if (waitqueue_active(&cl->wait))
2166 case MEI_FOP_DISCONNECT_RSP:
2168 mei_cl_set_disconnected(cl);
2177 * mei_cl_all_disconnect - disconnect forcefully all connected clients
2181 void mei_cl_all_disconnect(struct mei_device *dev)
2185 list_for_each_entry(cl, &dev->file_list, link)
2186 mei_cl_set_disconnected(cl);
2188 EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
2190 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
2194 list_for_each_entry(cl, &dev->file_list, link)
2195 if (cl->dma.buffer_id == buffer_id)
2201 * mei_cl_irq_dma_map - send client dma map request in irq_thread context
2204 * @cb: callback block.
2205 * @cmpl_list: complete list.
2207 * Return: 0 on such and error otherwise.
2209 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
2210 struct list_head *cmpl_list)
2212 struct mei_device *dev = cl->dev;
2217 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
2218 slots = mei_hbuf_empty_slots(dev);
2222 if ((u32)slots < msg_slots)
2225 ret = mei_hbm_cl_dma_map_req(dev, cl);
2228 list_move_tail(&cb->list, cmpl_list);
2232 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2237 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
2240 * @cb: callback block.
2241 * @cmpl_list: complete list.
2243 * Return: 0 on such and error otherwise.
2245 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
2246 struct list_head *cmpl_list)
2248 struct mei_device *dev = cl->dev;
2253 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
2254 slots = mei_hbuf_empty_slots(dev);
2258 if ((u32)slots < msg_slots)
2261 ret = mei_hbm_cl_dma_unmap_req(dev, cl);
2264 list_move_tail(&cb->list, cmpl_list);
2268 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2272 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
2274 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
2275 &cl->dma.daddr, GFP_KERNEL);
2279 cl->dma.buffer_id = buf_id;
2280 cl->dma.size = size;
2285 static void mei_cl_dma_free(struct mei_cl *cl)
2287 cl->dma.buffer_id = 0;
2288 dmam_free_coherent(cl->dev->dev,
2289 cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
2291 cl->dma.vaddr = NULL;
2296 * mei_cl_dma_alloc_and_map - send client dma map request
2299 * @fp: pointer to file structure
2300 * @buffer_id: id of the mapped buffer
2301 * @size: size of the buffer
2303 * Locking: called under "dev->device_lock" lock
2312 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
2313 u8 buffer_id, size_t size)
2315 struct mei_device *dev;
2316 struct mei_cl_cb *cb;
2319 if (WARN_ON(!cl || !cl->dev))
2324 if (!dev->hbm_f_cd_supported) {
2325 cl_dbg(dev, cl, "client dma is not supported\n");
2332 if (mei_cl_is_connected(cl))
2338 if (mei_cl_dma_map_find(dev, buffer_id)) {
2339 cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
2344 rets = pm_runtime_get(dev->dev);
2345 if (rets < 0 && rets != -EINPROGRESS) {
2346 pm_runtime_put_noidle(dev->dev);
2347 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2351 rets = mei_cl_dma_alloc(cl, buffer_id, size);
2353 pm_runtime_put_noidle(dev->dev);
2357 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
2363 if (mei_hbuf_acquire(dev)) {
2364 if (mei_hbm_cl_dma_map_req(dev, cl)) {
2368 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2373 mutex_unlock(&dev->device_lock);
2374 wait_event_timeout(cl->wait,
2375 cl->dma_mapped || cl->status,
2376 dev->timeouts.cl_connect);
2377 mutex_lock(&dev->device_lock);
2379 if (!cl->dma_mapped && !cl->status)
2380 cl->status = -EFAULT;
2386 mei_cl_dma_free(cl);
2388 cl_dbg(dev, cl, "rpm: autosuspend\n");
2389 pm_runtime_mark_last_busy(dev->dev);
2390 pm_runtime_put_autosuspend(dev->dev);
2397 * mei_cl_dma_unmap - send client dma unmap request
2400 * @fp: pointer to file structure
2402 * Locking: called under "dev->device_lock" lock
2404 * Return: 0 on such and error otherwise.
2406 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
2408 struct mei_device *dev;
2409 struct mei_cl_cb *cb;
2412 if (WARN_ON(!cl || !cl->dev))
2417 if (!dev->hbm_f_cd_supported) {
2418 cl_dbg(dev, cl, "client dma is not supported\n");
2422 /* do not allow unmap for connected client */
2423 if (mei_cl_is_connected(cl))
2426 if (!cl->dma_mapped)
2429 rets = pm_runtime_get(dev->dev);
2430 if (rets < 0 && rets != -EINPROGRESS) {
2431 pm_runtime_put_noidle(dev->dev);
2432 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2436 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
2442 if (mei_hbuf_acquire(dev)) {
2443 if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
2447 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2452 mutex_unlock(&dev->device_lock);
2453 wait_event_timeout(cl->wait,
2454 !cl->dma_mapped || cl->status,
2455 dev->timeouts.cl_connect);
2456 mutex_lock(&dev->device_lock);
2458 if (cl->dma_mapped && !cl->status)
2459 cl->status = -EFAULT;
2464 mei_cl_dma_free(cl);
2466 cl_dbg(dev, cl, "rpm: autosuspend\n");
2467 pm_runtime_mark_last_busy(dev->dev);
2468 pm_runtime_put_autosuspend(dev->dev);