]> Git Repo - linux.git/blob - fs/smb/server/ksmbd_work.c
x86/CPU/AMD: Move Zenbleed check to the Zen2 init function
[linux.git] / fs / smb / server / ksmbd_work.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
4  */
5
6 #include <linux/list.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10
11 #include "server.h"
12 #include "connection.h"
13 #include "ksmbd_work.h"
14 #include "mgmt/ksmbd_ida.h"
15
16 static struct kmem_cache *work_cache;
17 static struct workqueue_struct *ksmbd_wq;
18
19 struct ksmbd_work *ksmbd_alloc_work_struct(void)
20 {
21         struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
22
23         if (work) {
24                 work->compound_fid = KSMBD_NO_FID;
25                 work->compound_pfid = KSMBD_NO_FID;
26                 INIT_LIST_HEAD(&work->request_entry);
27                 INIT_LIST_HEAD(&work->async_request_entry);
28                 INIT_LIST_HEAD(&work->fp_entry);
29                 INIT_LIST_HEAD(&work->interim_entry);
30                 INIT_LIST_HEAD(&work->aux_read_list);
31                 work->iov_alloc_cnt = 4;
32                 work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
33                                     GFP_KERNEL);
34                 if (!work->iov) {
35                         kmem_cache_free(work_cache, work);
36                         work = NULL;
37                 }
38         }
39         return work;
40 }
41
42 void ksmbd_free_work_struct(struct ksmbd_work *work)
43 {
44         struct aux_read *ar, *tmp;
45
46         WARN_ON(work->saved_cred != NULL);
47
48         kvfree(work->response_buf);
49
50         list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
51                 kvfree(ar->buf);
52                 list_del(&ar->entry);
53                 kfree(ar);
54         }
55
56         kfree(work->tr_buf);
57         kvfree(work->request_buf);
58         kfree(work->iov);
59         if (work->async_id)
60                 ksmbd_release_id(&work->conn->async_ida, work->async_id);
61         kmem_cache_free(work_cache, work);
62 }
63
64 void ksmbd_work_pool_destroy(void)
65 {
66         kmem_cache_destroy(work_cache);
67 }
68
69 int ksmbd_work_pool_init(void)
70 {
71         work_cache = kmem_cache_create("ksmbd_work_cache",
72                                        sizeof(struct ksmbd_work), 0,
73                                        SLAB_HWCACHE_ALIGN, NULL);
74         if (!work_cache)
75                 return -ENOMEM;
76         return 0;
77 }
78
79 int ksmbd_workqueue_init(void)
80 {
81         ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
82         if (!ksmbd_wq)
83                 return -ENOMEM;
84         return 0;
85 }
86
87 void ksmbd_workqueue_destroy(void)
88 {
89         destroy_workqueue(ksmbd_wq);
90         ksmbd_wq = NULL;
91 }
92
93 bool ksmbd_queue_work(struct ksmbd_work *work)
94 {
95         return queue_work(ksmbd_wq, &work->work);
96 }
97
98 static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
99                                    unsigned int ib_len)
100 {
101         work->iov[++work->iov_idx].iov_base = ib;
102         work->iov[work->iov_idx].iov_len = ib_len;
103         work->iov_cnt++;
104 }
105
106 static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
107                                void *aux_buf, unsigned int aux_size)
108 {
109         struct aux_read *ar;
110         int need_iov_cnt = 1;
111
112         if (aux_size) {
113                 need_iov_cnt++;
114                 ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
115                 if (!ar)
116                         return -ENOMEM;
117         }
118
119         if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
120                 struct kvec *new;
121
122                 work->iov_alloc_cnt += 4;
123                 new = krealloc(work->iov,
124                                sizeof(struct kvec) * work->iov_alloc_cnt,
125                                GFP_KERNEL | __GFP_ZERO);
126                 if (!new)
127                         return -ENOMEM;
128                 work->iov = new;
129         }
130
131         /* Plus rfc_length size on first iov */
132         if (!work->iov_idx) {
133                 work->iov[work->iov_idx].iov_base = work->response_buf;
134                 *(__be32 *)work->iov[0].iov_base = 0;
135                 work->iov[work->iov_idx].iov_len = 4;
136                 work->iov_cnt++;
137         }
138
139         __ksmbd_iov_pin(work, ib, len);
140         inc_rfc1001_len(work->iov[0].iov_base, len);
141
142         if (aux_size) {
143                 __ksmbd_iov_pin(work, aux_buf, aux_size);
144                 inc_rfc1001_len(work->iov[0].iov_base, aux_size);
145
146                 ar->buf = aux_buf;
147                 list_add(&ar->entry, &work->aux_read_list);
148         }
149
150         return 0;
151 }
152
153 int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
154 {
155         return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
156 }
157
158 int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
159                            void *aux_buf, unsigned int aux_size)
160 {
161         return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
162 }
163
164 int allocate_interim_rsp_buf(struct ksmbd_work *work)
165 {
166         work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
167         if (!work->response_buf)
168                 return -ENOMEM;
169         work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
170         return 0;
171 }
This page took 0.034748 seconds and 4 git commands to generate.