1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015 Intel Corporation
6 #include <linux/blkdev.h>
8 #include <linux/unaligned.h>
12 static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
15 case PR_WRITE_EXCLUSIVE:
16 return NVME_PR_WRITE_EXCLUSIVE;
17 case PR_EXCLUSIVE_ACCESS:
18 return NVME_PR_EXCLUSIVE_ACCESS;
19 case PR_WRITE_EXCLUSIVE_REG_ONLY:
20 return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
21 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
22 return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
23 case PR_WRITE_EXCLUSIVE_ALL_REGS:
24 return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
25 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
26 return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
32 static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
35 case NVME_PR_WRITE_EXCLUSIVE:
36 return PR_WRITE_EXCLUSIVE;
37 case NVME_PR_EXCLUSIVE_ACCESS:
38 return PR_EXCLUSIVE_ACCESS;
39 case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
40 return PR_WRITE_EXCLUSIVE_REG_ONLY;
41 case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
42 return PR_EXCLUSIVE_ACCESS_REG_ONLY;
43 case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
44 return PR_WRITE_EXCLUSIVE_ALL_REGS;
45 case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
46 return PR_EXCLUSIVE_ACCESS_ALL_REGS;
52 static int nvme_send_ns_head_pr_command(struct block_device *bdev,
53 struct nvme_command *c, void *data, unsigned int data_len)
55 struct nvme_ns_head *head = bdev->bd_disk->private_data;
56 int srcu_idx = srcu_read_lock(&head->srcu);
57 struct nvme_ns *ns = nvme_find_path(head);
58 int ret = -EWOULDBLOCK;
61 c->common.nsid = cpu_to_le32(ns->head->ns_id);
62 ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
64 srcu_read_unlock(&head->srcu, srcu_idx);
68 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
69 void *data, unsigned int data_len)
71 c->common.nsid = cpu_to_le32(ns->head->ns_id);
72 return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
75 static int nvme_status_to_pr_err(int status)
77 if (nvme_is_path_error(status))
78 return PR_STS_PATH_FAILED;
80 switch (status & NVME_SCT_SC_MASK) {
82 return PR_STS_SUCCESS;
83 case NVME_SC_RESERVATION_CONFLICT:
84 return PR_STS_RESERVATION_CONFLICT;
85 case NVME_SC_ONCS_NOT_SUPPORTED:
87 case NVME_SC_BAD_ATTRIBUTES:
88 case NVME_SC_INVALID_OPCODE:
89 case NVME_SC_INVALID_FIELD:
90 case NVME_SC_INVALID_NS:
97 static int __nvme_send_pr_command(struct block_device *bdev, u32 cdw10,
98 u32 cdw11, u8 op, void *data, unsigned int data_len)
100 struct nvme_command c = { 0 };
102 c.common.opcode = op;
103 c.common.cdw10 = cpu_to_le32(cdw10);
104 c.common.cdw11 = cpu_to_le32(cdw11);
106 if (nvme_disk_is_ns_head(bdev->bd_disk))
107 return nvme_send_ns_head_pr_command(bdev, &c, data, data_len);
108 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c,
112 static int nvme_send_pr_command(struct block_device *bdev, u32 cdw10, u32 cdw11,
113 u8 op, void *data, unsigned int data_len)
117 ret = __nvme_send_pr_command(bdev, cdw10, cdw11, op, data, data_len);
118 return ret < 0 ? ret : nvme_status_to_pr_err(ret);
121 static int nvme_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
124 struct nvmet_pr_register_data data = { 0 };
127 if (flags & ~PR_FL_IGNORE_KEY)
130 data.crkey = cpu_to_le64(old_key);
131 data.nrkey = cpu_to_le64(new_key);
133 cdw10 = old_key ? NVME_PR_REGISTER_ACT_REPLACE :
134 NVME_PR_REGISTER_ACT_REG;
135 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
136 cdw10 |= NVME_PR_CPTPL_PERSIST;
138 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_register,
139 &data, sizeof(data));
142 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
143 enum pr_type type, unsigned flags)
145 struct nvmet_pr_acquire_data data = { 0 };
148 if (flags & ~PR_FL_IGNORE_KEY)
151 data.crkey = cpu_to_le64(key);
153 cdw10 = NVME_PR_ACQUIRE_ACT_ACQUIRE;
154 cdw10 |= nvme_pr_type_from_blk(type) << 8;
155 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
157 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
158 &data, sizeof(data));
161 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
162 enum pr_type type, bool abort)
164 struct nvmet_pr_acquire_data data = { 0 };
167 data.crkey = cpu_to_le64(old);
168 data.prkey = cpu_to_le64(new);
170 cdw10 = abort ? NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT :
171 NVME_PR_ACQUIRE_ACT_PREEMPT;
172 cdw10 |= nvme_pr_type_from_blk(type) << 8;
174 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
175 &data, sizeof(data));
178 static int nvme_pr_clear(struct block_device *bdev, u64 key)
180 struct nvmet_pr_release_data data = { 0 };
183 data.crkey = cpu_to_le64(key);
185 cdw10 = NVME_PR_RELEASE_ACT_CLEAR;
186 cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
188 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
189 &data, sizeof(data));
192 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
194 struct nvmet_pr_release_data data = { 0 };
197 data.crkey = cpu_to_le64(key);
199 cdw10 = NVME_PR_RELEASE_ACT_RELEASE;
200 cdw10 |= nvme_pr_type_from_blk(type) << 8;
201 cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
203 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
204 &data, sizeof(data));
207 static int nvme_pr_resv_report(struct block_device *bdev, void *data,
208 u32 data_len, bool *eds)
213 cdw10 = nvme_bytes_to_numd(data_len);
214 cdw11 = NVME_EXTENDED_DATA_STRUCT;
218 ret = __nvme_send_pr_command(bdev, cdw10, cdw11, nvme_cmd_resv_report,
220 if (ret == NVME_SC_HOST_ID_INCONSIST &&
221 cdw11 == NVME_EXTENDED_DATA_STRUCT) {
227 return ret < 0 ? ret : nvme_status_to_pr_err(ret);
230 static int nvme_pr_read_keys(struct block_device *bdev,
231 struct pr_keys *keys_info)
233 u32 rse_len, num_keys = keys_info->num_keys;
234 struct nvme_reservation_status_ext *rse;
239 * Assume we are using 128-bit host IDs and allocate a buffer large
240 * enough to get enough keys to fill the return keys buffer.
242 rse_len = struct_size(rse, regctl_eds, num_keys);
243 rse = kzalloc(rse_len, GFP_KERNEL);
247 ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
251 keys_info->generation = le32_to_cpu(rse->gen);
252 keys_info->num_keys = get_unaligned_le16(&rse->regctl);
254 num_keys = min(num_keys, keys_info->num_keys);
255 for (i = 0; i < num_keys; i++) {
258 le64_to_cpu(rse->regctl_eds[i].rkey);
260 struct nvme_reservation_status *rs;
262 rs = (struct nvme_reservation_status *)rse;
263 keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
272 static int nvme_pr_read_reservation(struct block_device *bdev,
273 struct pr_held_reservation *resv)
275 struct nvme_reservation_status_ext tmp_rse, *rse;
276 int ret, i, num_regs;
282 * Get the number of registrations so we know how big to allocate
283 * the response buffer.
285 ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
289 num_regs = get_unaligned_le16(&tmp_rse.regctl);
291 resv->generation = le32_to_cpu(tmp_rse.gen);
295 rse_len = struct_size(rse, regctl_eds, num_regs);
296 rse = kzalloc(rse_len, GFP_KERNEL);
300 ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
304 if (num_regs != get_unaligned_le16(&rse->regctl)) {
309 resv->generation = le32_to_cpu(rse->gen);
310 resv->type = block_pr_type_from_nvme(rse->rtype);
312 for (i = 0; i < num_regs; i++) {
314 if (rse->regctl_eds[i].rcsts) {
315 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
319 struct nvme_reservation_status *rs;
321 rs = (struct nvme_reservation_status *)rse;
322 if (rs->regctl_ds[i].rcsts) {
323 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
334 const struct pr_ops nvme_pr_ops = {
335 .pr_register = nvme_pr_register,
336 .pr_reserve = nvme_pr_reserve,
337 .pr_release = nvme_pr_release,
338 .pr_preempt = nvme_pr_preempt,
339 .pr_clear = nvme_pr_clear,
340 .pr_read_keys = nvme_pr_read_keys,
341 .pr_read_reservation = nvme_pr_read_reservation,