1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2019 Samsung Electronics Co., Ltd.
6 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
12 #include "connection.h"
13 #include "ksmbd_work.h"
14 #include "mgmt/ksmbd_ida.h"
16 static struct kmem_cache *work_cache;
17 static struct workqueue_struct *ksmbd_wq;
19 struct ksmbd_work *ksmbd_alloc_work_struct(void)
21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
24 work->compound_fid = KSMBD_NO_FID;
25 work->compound_pfid = KSMBD_NO_FID;
26 INIT_LIST_HEAD(&work->request_entry);
27 INIT_LIST_HEAD(&work->async_request_entry);
28 INIT_LIST_HEAD(&work->fp_entry);
29 INIT_LIST_HEAD(&work->interim_entry);
30 INIT_LIST_HEAD(&work->aux_read_list);
31 work->iov_alloc_cnt = 4;
32 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
35 kmem_cache_free(work_cache, work);
42 void ksmbd_free_work_struct(struct ksmbd_work *work)
44 struct aux_read *ar, *tmp;
46 WARN_ON(work->saved_cred != NULL);
48 kvfree(work->response_buf);
50 list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
57 kvfree(work->request_buf);
60 ksmbd_release_id(&work->conn->async_ida, work->async_id);
61 kmem_cache_free(work_cache, work);
64 void ksmbd_work_pool_destroy(void)
66 kmem_cache_destroy(work_cache);
69 int ksmbd_work_pool_init(void)
71 work_cache = kmem_cache_create("ksmbd_work_cache",
72 sizeof(struct ksmbd_work), 0,
73 SLAB_HWCACHE_ALIGN, NULL);
79 int ksmbd_workqueue_init(void)
81 ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
87 void ksmbd_workqueue_destroy(void)
89 destroy_workqueue(ksmbd_wq);
93 bool ksmbd_queue_work(struct ksmbd_work *work)
95 return queue_work(ksmbd_wq, &work->work);
98 static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
101 work->iov[++work->iov_idx].iov_base = ib;
102 work->iov[work->iov_idx].iov_len = ib_len;
106 static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
107 void *aux_buf, unsigned int aux_size)
110 int need_iov_cnt = 1;
114 ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
119 if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
122 work->iov_alloc_cnt += 4;
123 new = krealloc(work->iov,
124 sizeof(struct kvec) * work->iov_alloc_cnt,
125 GFP_KERNEL | __GFP_ZERO);
131 /* Plus rfc_length size on first iov */
132 if (!work->iov_idx) {
133 work->iov[work->iov_idx].iov_base = work->response_buf;
134 *(__be32 *)work->iov[0].iov_base = 0;
135 work->iov[work->iov_idx].iov_len = 4;
139 __ksmbd_iov_pin(work, ib, len);
140 inc_rfc1001_len(work->iov[0].iov_base, len);
143 __ksmbd_iov_pin(work, aux_buf, aux_size);
144 inc_rfc1001_len(work->iov[0].iov_base, aux_size);
147 list_add(&ar->entry, &work->aux_read_list);
153 int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
155 return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
158 int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
159 void *aux_buf, unsigned int aux_size)
161 return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
164 int allocate_interim_rsp_buf(struct ksmbd_work *work)
166 work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
167 if (!work->response_buf)
169 work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;