]> Git Repo - linux.git/blob - drivers/nvme/host/pr.c
Linux 6.14-rc3
[linux.git] / drivers / nvme / host / pr.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015 Intel Corporation
4  *      Keith Busch <[email protected]>
5  */
6 #include <linux/blkdev.h>
7 #include <linux/pr.h>
8 #include <linux/unaligned.h>
9
10 #include "nvme.h"
11
12 static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
13 {
14         switch (type) {
15         case PR_WRITE_EXCLUSIVE:
16                 return NVME_PR_WRITE_EXCLUSIVE;
17         case PR_EXCLUSIVE_ACCESS:
18                 return NVME_PR_EXCLUSIVE_ACCESS;
19         case PR_WRITE_EXCLUSIVE_REG_ONLY:
20                 return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
21         case PR_EXCLUSIVE_ACCESS_REG_ONLY:
22                 return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
23         case PR_WRITE_EXCLUSIVE_ALL_REGS:
24                 return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
25         case PR_EXCLUSIVE_ACCESS_ALL_REGS:
26                 return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
27         }
28
29         return 0;
30 }
31
32 static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
33 {
34         switch (type) {
35         case NVME_PR_WRITE_EXCLUSIVE:
36                 return PR_WRITE_EXCLUSIVE;
37         case NVME_PR_EXCLUSIVE_ACCESS:
38                 return PR_EXCLUSIVE_ACCESS;
39         case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
40                 return PR_WRITE_EXCLUSIVE_REG_ONLY;
41         case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
42                 return PR_EXCLUSIVE_ACCESS_REG_ONLY;
43         case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
44                 return PR_WRITE_EXCLUSIVE_ALL_REGS;
45         case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
46                 return PR_EXCLUSIVE_ACCESS_ALL_REGS;
47         }
48
49         return 0;
50 }
51
52 static int nvme_send_ns_head_pr_command(struct block_device *bdev,
53                 struct nvme_command *c, void *data, unsigned int data_len)
54 {
55         struct nvme_ns_head *head = bdev->bd_disk->private_data;
56         int srcu_idx = srcu_read_lock(&head->srcu);
57         struct nvme_ns *ns = nvme_find_path(head);
58         int ret = -EWOULDBLOCK;
59
60         if (ns) {
61                 c->common.nsid = cpu_to_le32(ns->head->ns_id);
62                 ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
63         }
64         srcu_read_unlock(&head->srcu, srcu_idx);
65         return ret;
66 }
67
68 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
69                 void *data, unsigned int data_len)
70 {
71         c->common.nsid = cpu_to_le32(ns->head->ns_id);
72         return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
73 }
74
75 static int nvme_status_to_pr_err(int status)
76 {
77         if (nvme_is_path_error(status))
78                 return PR_STS_PATH_FAILED;
79
80         switch (status & NVME_SCT_SC_MASK) {
81         case NVME_SC_SUCCESS:
82                 return PR_STS_SUCCESS;
83         case NVME_SC_RESERVATION_CONFLICT:
84                 return PR_STS_RESERVATION_CONFLICT;
85         case NVME_SC_ONCS_NOT_SUPPORTED:
86                 return -EOPNOTSUPP;
87         case NVME_SC_BAD_ATTRIBUTES:
88         case NVME_SC_INVALID_OPCODE:
89         case NVME_SC_INVALID_FIELD:
90         case NVME_SC_INVALID_NS:
91                 return -EINVAL;
92         default:
93                 return PR_STS_IOERR;
94         }
95 }
96
97 static int __nvme_send_pr_command(struct block_device *bdev, u32 cdw10,
98                 u32 cdw11, u8 op, void *data, unsigned int data_len)
99 {
100         struct nvme_command c = { 0 };
101
102         c.common.opcode = op;
103         c.common.cdw10 = cpu_to_le32(cdw10);
104         c.common.cdw11 = cpu_to_le32(cdw11);
105
106         if (nvme_disk_is_ns_head(bdev->bd_disk))
107                 return nvme_send_ns_head_pr_command(bdev, &c, data, data_len);
108         return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c,
109                                 data, data_len);
110 }
111
112 static int nvme_send_pr_command(struct block_device *bdev, u32 cdw10, u32 cdw11,
113                 u8 op, void *data, unsigned int data_len)
114 {
115         int ret;
116
117         ret = __nvme_send_pr_command(bdev, cdw10, cdw11, op, data, data_len);
118         return ret < 0 ? ret : nvme_status_to_pr_err(ret);
119 }
120
121 static int nvme_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
122                 unsigned int flags)
123 {
124         struct nvmet_pr_register_data data = { 0 };
125         u32 cdw10;
126
127         if (flags & ~PR_FL_IGNORE_KEY)
128                 return -EOPNOTSUPP;
129
130         data.crkey = cpu_to_le64(old_key);
131         data.nrkey = cpu_to_le64(new_key);
132
133         cdw10 = old_key ? NVME_PR_REGISTER_ACT_REPLACE :
134                 NVME_PR_REGISTER_ACT_REG;
135         cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
136         cdw10 |= NVME_PR_CPTPL_PERSIST;
137
138         return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_register,
139                         &data, sizeof(data));
140 }
141
142 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
143                 enum pr_type type, unsigned flags)
144 {
145         struct nvmet_pr_acquire_data data = { 0 };
146         u32 cdw10;
147
148         if (flags & ~PR_FL_IGNORE_KEY)
149                 return -EOPNOTSUPP;
150
151         data.crkey = cpu_to_le64(key);
152
153         cdw10 = NVME_PR_ACQUIRE_ACT_ACQUIRE;
154         cdw10 |= nvme_pr_type_from_blk(type) << 8;
155         cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
156
157         return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
158                         &data, sizeof(data));
159 }
160
161 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
162                 enum pr_type type, bool abort)
163 {
164         struct nvmet_pr_acquire_data data = { 0 };
165         u32 cdw10;
166
167         data.crkey = cpu_to_le64(old);
168         data.prkey = cpu_to_le64(new);
169
170         cdw10 = abort ? NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT :
171                         NVME_PR_ACQUIRE_ACT_PREEMPT;
172         cdw10 |= nvme_pr_type_from_blk(type) << 8;
173
174         return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
175                         &data, sizeof(data));
176 }
177
178 static int nvme_pr_clear(struct block_device *bdev, u64 key)
179 {
180         struct nvmet_pr_release_data data = { 0 };
181         u32 cdw10;
182
183         data.crkey = cpu_to_le64(key);
184
185         cdw10 = NVME_PR_RELEASE_ACT_CLEAR;
186         cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
187
188         return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
189                         &data, sizeof(data));
190 }
191
192 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
193 {
194         struct nvmet_pr_release_data data = { 0 };
195         u32 cdw10;
196
197         data.crkey = cpu_to_le64(key);
198
199         cdw10 = NVME_PR_RELEASE_ACT_RELEASE;
200         cdw10 |= nvme_pr_type_from_blk(type) << 8;
201         cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
202
203         return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
204                         &data, sizeof(data));
205 }
206
207 static int nvme_pr_resv_report(struct block_device *bdev, void *data,
208                 u32 data_len, bool *eds)
209 {
210         u32 cdw10, cdw11;
211         int ret;
212
213         cdw10 = nvme_bytes_to_numd(data_len);
214         cdw11 = NVME_EXTENDED_DATA_STRUCT;
215         *eds = true;
216
217 retry:
218         ret = __nvme_send_pr_command(bdev, cdw10, cdw11, nvme_cmd_resv_report,
219                         data, data_len);
220         if (ret == NVME_SC_HOST_ID_INCONSIST &&
221             cdw11 == NVME_EXTENDED_DATA_STRUCT) {
222                 cdw11 = 0;
223                 *eds = false;
224                 goto retry;
225         }
226
227         return ret < 0 ? ret : nvme_status_to_pr_err(ret);
228 }
229
230 static int nvme_pr_read_keys(struct block_device *bdev,
231                 struct pr_keys *keys_info)
232 {
233         u32 rse_len, num_keys = keys_info->num_keys;
234         struct nvme_reservation_status_ext *rse;
235         int ret, i;
236         bool eds;
237
238         /*
239          * Assume we are using 128-bit host IDs and allocate a buffer large
240          * enough to get enough keys to fill the return keys buffer.
241          */
242         rse_len = struct_size(rse, regctl_eds, num_keys);
243         rse = kzalloc(rse_len, GFP_KERNEL);
244         if (!rse)
245                 return -ENOMEM;
246
247         ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
248         if (ret)
249                 goto free_rse;
250
251         keys_info->generation = le32_to_cpu(rse->gen);
252         keys_info->num_keys = get_unaligned_le16(&rse->regctl);
253
254         num_keys = min(num_keys, keys_info->num_keys);
255         for (i = 0; i < num_keys; i++) {
256                 if (eds) {
257                         keys_info->keys[i] =
258                                         le64_to_cpu(rse->regctl_eds[i].rkey);
259                 } else {
260                         struct nvme_reservation_status *rs;
261
262                         rs = (struct nvme_reservation_status *)rse;
263                         keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
264                 }
265         }
266
267 free_rse:
268         kfree(rse);
269         return ret;
270 }
271
272 static int nvme_pr_read_reservation(struct block_device *bdev,
273                 struct pr_held_reservation *resv)
274 {
275         struct nvme_reservation_status_ext tmp_rse, *rse;
276         int ret, i, num_regs;
277         u32 rse_len;
278         bool eds;
279
280 get_num_regs:
281         /*
282          * Get the number of registrations so we know how big to allocate
283          * the response buffer.
284          */
285         ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
286         if (ret)
287                 return ret;
288
289         num_regs = get_unaligned_le16(&tmp_rse.regctl);
290         if (!num_regs) {
291                 resv->generation = le32_to_cpu(tmp_rse.gen);
292                 return 0;
293         }
294
295         rse_len = struct_size(rse, regctl_eds, num_regs);
296         rse = kzalloc(rse_len, GFP_KERNEL);
297         if (!rse)
298                 return -ENOMEM;
299
300         ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
301         if (ret)
302                 goto free_rse;
303
304         if (num_regs != get_unaligned_le16(&rse->regctl)) {
305                 kfree(rse);
306                 goto get_num_regs;
307         }
308
309         resv->generation = le32_to_cpu(rse->gen);
310         resv->type = block_pr_type_from_nvme(rse->rtype);
311
312         for (i = 0; i < num_regs; i++) {
313                 if (eds) {
314                         if (rse->regctl_eds[i].rcsts) {
315                                 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
316                                 break;
317                         }
318                 } else {
319                         struct nvme_reservation_status *rs;
320
321                         rs = (struct nvme_reservation_status *)rse;
322                         if (rs->regctl_ds[i].rcsts) {
323                                 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
324                                 break;
325                         }
326                 }
327         }
328
329 free_rse:
330         kfree(rse);
331         return ret;
332 }
333
334 const struct pr_ops nvme_pr_ops = {
335         .pr_register    = nvme_pr_register,
336         .pr_reserve     = nvme_pr_reserve,
337         .pr_release     = nvme_pr_release,
338         .pr_preempt     = nvme_pr_preempt,
339         .pr_clear       = nvme_pr_clear,
340         .pr_read_keys   = nvme_pr_read_keys,
341         .pr_read_reservation = nvme_pr_read_reservation,
342 };
This page took 0.050965 seconds and 4 git commands to generate.