]> Git Repo - linux.git/blob - drivers/vdpa/octeon_ep/octep_vdpa_hw.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / drivers / vdpa / octeon_ep / octep_vdpa_hw.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Marvell. */
3
4 #include <linux/iopoll.h>
5
6 #include "octep_vdpa.h"
7
8 enum octep_mbox_ids {
9         OCTEP_MBOX_MSG_SET_VQ_STATE = 1,
10         OCTEP_MBOX_MSG_GET_VQ_STATE,
11 };
12
13 #define OCTEP_HW_TIMEOUT       10000000
14
15 #define MBOX_OFFSET            64
16 #define MBOX_RSP_MASK          0x00000001
17 #define MBOX_RC_MASK           0x0000FFFE
18
19 #define MBOX_RSP_TO_ERR(val)   (-(((val) & MBOX_RC_MASK) >> 2))
20 #define MBOX_AVAIL(val)        (((val) & MBOX_RSP_MASK))
21 #define MBOX_RSP(val)          ((val) & (MBOX_RC_MASK | MBOX_RSP_MASK))
22
23 #define DEV_RST_ACK_BIT        7
24 #define FEATURE_SEL_ACK_BIT    15
25 #define QUEUE_SEL_ACK_BIT      15
26
27 struct octep_mbox_hdr {
28         u8 ver;
29         u8 rsvd1;
30         u16 id;
31         u16 rsvd2;
32 #define MBOX_REQ_SIG (0xdead)
33 #define MBOX_RSP_SIG (0xbeef)
34         u16 sig;
35 };
36
37 struct octep_mbox_sts {
38         u16 rsp:1;
39         u16 rc:15;
40         u16 rsvd;
41 };
42
43 struct octep_mbox {
44         struct octep_mbox_hdr hdr;
45         struct octep_mbox_sts sts;
46         u64 rsvd;
47         u32 data[];
48 };
49
50 static inline struct octep_mbox __iomem *octep_get_mbox(struct octep_hw *oct_hw)
51 {
52         return (struct octep_mbox __iomem *)(oct_hw->dev_cfg + MBOX_OFFSET);
53 }
54
55 static inline int octep_wait_for_mbox_avail(struct octep_mbox __iomem *mbox)
56 {
57         u32 val;
58
59         return readx_poll_timeout(ioread32, &mbox->sts, val, MBOX_AVAIL(val), 10,
60                                   OCTEP_HW_TIMEOUT);
61 }
62
63 static inline int octep_wait_for_mbox_rsp(struct octep_mbox __iomem *mbox)
64 {
65         u32 val;
66
67         return readx_poll_timeout(ioread32, &mbox->sts, val, MBOX_RSP(val), 10,
68                                   OCTEP_HW_TIMEOUT);
69 }
70
71 static inline void octep_write_hdr(struct octep_mbox __iomem *mbox, u16 id, u16 sig)
72 {
73         iowrite16(id, &mbox->hdr.id);
74         iowrite16(sig, &mbox->hdr.sig);
75 }
76
77 static inline u32 octep_read_sig(struct octep_mbox __iomem *mbox)
78 {
79         return ioread16(&mbox->hdr.sig);
80 }
81
82 static inline void octep_write_sts(struct octep_mbox __iomem *mbox, u32 sts)
83 {
84         iowrite32(sts, &mbox->sts);
85 }
86
87 static inline u32 octep_read_sts(struct octep_mbox __iomem *mbox)
88 {
89         return ioread32(&mbox->sts);
90 }
91
92 static inline u32 octep_read32_word(struct octep_mbox __iomem *mbox, u16 word_idx)
93 {
94         return ioread32(&mbox->data[word_idx]);
95 }
96
97 static inline void octep_write32_word(struct octep_mbox __iomem *mbox, u16 word_idx, u32 word)
98 {
99         return iowrite32(word, &mbox->data[word_idx]);
100 }
101
102 static int octep_process_mbox(struct octep_hw *oct_hw, u16 id, u16 qid, void *buffer,
103                               u32 buf_size, bool write)
104 {
105         struct octep_mbox __iomem *mbox = octep_get_mbox(oct_hw);
106         struct pci_dev *pdev = oct_hw->pdev;
107         u32 *p = (u32 *)buffer;
108         u16 data_wds;
109         int ret, i;
110         u32 val;
111
112         if (!IS_ALIGNED(buf_size, 4))
113                 return -EINVAL;
114
115         /* Make sure mbox space is available */
116         ret = octep_wait_for_mbox_avail(mbox);
117         if (ret) {
118                 dev_warn(&pdev->dev, "Timeout waiting for previous mbox data to be consumed\n");
119                 return ret;
120         }
121         data_wds = buf_size / 4;
122
123         if (write) {
124                 for (i = 1; i <= data_wds; i++) {
125                         octep_write32_word(mbox, i, *p);
126                         p++;
127                 }
128         }
129         octep_write32_word(mbox, 0, (u32)qid);
130         octep_write_sts(mbox, 0);
131
132         octep_write_hdr(mbox, id, MBOX_REQ_SIG);
133
134         ret = octep_wait_for_mbox_rsp(mbox);
135         if (ret) {
136                 dev_warn(&pdev->dev, "Timeout waiting for mbox : %d response\n", id);
137                 return ret;
138         }
139
140         val = octep_read_sig(mbox);
141         if ((val & 0xFFFF) != MBOX_RSP_SIG) {
142                 dev_warn(&pdev->dev, "Invalid Signature from mbox : %d response\n", id);
143                 return -EINVAL;
144         }
145
146         val = octep_read_sts(mbox);
147         if (val & MBOX_RC_MASK) {
148                 ret = MBOX_RSP_TO_ERR(val);
149                 dev_warn(&pdev->dev, "Error while processing mbox : %d, err %d\n", id, ret);
150                 return ret;
151         }
152
153         if (!write)
154                 for (i = 1; i <= data_wds; i++)
155                         *p++ = octep_read32_word(mbox, i);
156
157         return 0;
158 }
159
160 static void octep_mbox_init(struct octep_mbox __iomem *mbox)
161 {
162         iowrite32(1, &mbox->sts);
163 }
164
165 int octep_verify_features(u64 features)
166 {
167         /* Minimum features to expect */
168         if (!(features & BIT_ULL(VIRTIO_F_VERSION_1)))
169                 return -EOPNOTSUPP;
170
171         if (!(features & BIT_ULL(VIRTIO_F_NOTIFICATION_DATA)))
172                 return -EOPNOTSUPP;
173
174         if (!(features & BIT_ULL(VIRTIO_F_RING_PACKED)))
175                 return -EOPNOTSUPP;
176
177         return 0;
178 }
179
180 u8 octep_hw_get_status(struct octep_hw *oct_hw)
181 {
182         return ioread8(&oct_hw->common_cfg->device_status);
183 }
184
185 void octep_hw_set_status(struct octep_hw *oct_hw, u8 status)
186 {
187         iowrite8(status, &oct_hw->common_cfg->device_status);
188 }
189
190 void octep_hw_reset(struct octep_hw *oct_hw)
191 {
192         u8 val;
193
194         octep_hw_set_status(oct_hw, 0 | BIT(DEV_RST_ACK_BIT));
195         if (readx_poll_timeout(ioread8, &oct_hw->common_cfg->device_status, val, !val, 10,
196                                OCTEP_HW_TIMEOUT)) {
197                 dev_warn(&oct_hw->pdev->dev, "Octeon device reset timeout\n");
198                 return;
199         }
200 }
201
202 static int feature_sel_write_with_timeout(struct octep_hw *oct_hw, u32 select, void __iomem *addr)
203 {
204         u32 val;
205
206         iowrite32(select | BIT(FEATURE_SEL_ACK_BIT), addr);
207
208         if (readx_poll_timeout(ioread32, addr, val, val == select, 10, OCTEP_HW_TIMEOUT)) {
209                 dev_warn(&oct_hw->pdev->dev, "Feature select%d write timeout\n", select);
210                 return -1;
211         }
212         return 0;
213 }
214
215 u64 octep_hw_get_dev_features(struct octep_hw *oct_hw)
216 {
217         u32 features_lo, features_hi;
218
219         if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->device_feature_select))
220                 return 0;
221
222         features_lo = ioread32(&oct_hw->common_cfg->device_feature);
223
224         if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->device_feature_select))
225                 return 0;
226
227         features_hi = ioread32(&oct_hw->common_cfg->device_feature);
228
229         return ((u64)features_hi << 32) | features_lo;
230 }
231
232 u64 octep_hw_get_drv_features(struct octep_hw *oct_hw)
233 {
234         u32 features_lo, features_hi;
235
236         if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->guest_feature_select))
237                 return 0;
238
239         features_lo = ioread32(&oct_hw->common_cfg->guest_feature);
240
241         if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->guest_feature_select))
242                 return 0;
243
244         features_hi = ioread32(&oct_hw->common_cfg->guest_feature);
245
246         return ((u64)features_hi << 32) | features_lo;
247 }
248
249 void octep_hw_set_drv_features(struct octep_hw *oct_hw, u64 features)
250 {
251         if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->guest_feature_select))
252                 return;
253
254         iowrite32(features & (BIT_ULL(32) - 1), &oct_hw->common_cfg->guest_feature);
255
256         if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->guest_feature_select))
257                 return;
258
259         iowrite32(features >> 32, &oct_hw->common_cfg->guest_feature);
260 }
261
262 void octep_write_queue_select(struct octep_hw *oct_hw, u16 queue_id)
263 {
264         u16 val;
265
266         iowrite16(queue_id | BIT(QUEUE_SEL_ACK_BIT), &oct_hw->common_cfg->queue_select);
267
268         if (readx_poll_timeout(ioread16, &oct_hw->common_cfg->queue_select, val, val == queue_id,
269                                10, OCTEP_HW_TIMEOUT)) {
270                 dev_warn(&oct_hw->pdev->dev, "Queue select write timeout\n");
271                 return;
272         }
273 }
274
275 void octep_notify_queue(struct octep_hw *oct_hw, u16 qid)
276 {
277         iowrite16(qid, oct_hw->vqs[qid].notify_addr);
278 }
279
280 void octep_read_dev_config(struct octep_hw *oct_hw, u64 offset, void *dst, int length)
281 {
282         u8 old_gen, new_gen, *p;
283         int i;
284
285         if (WARN_ON(offset + length > oct_hw->config_size))
286                 return;
287
288         do {
289                 old_gen = ioread8(&oct_hw->common_cfg->config_generation);
290                 p = dst;
291                 for (i = 0; i < length; i++)
292                         *p++ = ioread8(oct_hw->dev_cfg + offset + i);
293
294                 new_gen = ioread8(&oct_hw->common_cfg->config_generation);
295         } while (old_gen != new_gen);
296 }
297
298 int octep_set_vq_address(struct octep_hw *oct_hw, u16 qid, u64 desc_area, u64 driver_area,
299                          u64 device_area)
300 {
301         struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
302
303         octep_write_queue_select(oct_hw, qid);
304         vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
305                              &cfg->queue_desc_hi);
306         vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
307                              &cfg->queue_avail_hi);
308         vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
309                              &cfg->queue_used_hi);
310
311         return 0;
312 }
313
314 int octep_get_vq_state(struct octep_hw *oct_hw, u16 qid, struct vdpa_vq_state *state)
315 {
316         return octep_process_mbox(oct_hw, OCTEP_MBOX_MSG_GET_VQ_STATE, qid, state,
317                                   sizeof(*state), 0);
318 }
319
320 int octep_set_vq_state(struct octep_hw *oct_hw, u16 qid, const struct vdpa_vq_state *state)
321 {
322         struct vdpa_vq_state q_state;
323
324         memcpy(&q_state, state, sizeof(struct vdpa_vq_state));
325         return octep_process_mbox(oct_hw, OCTEP_MBOX_MSG_SET_VQ_STATE, qid, &q_state,
326                                   sizeof(*state), 1);
327 }
328
329 void octep_set_vq_num(struct octep_hw *oct_hw, u16 qid, u32 num)
330 {
331         struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
332
333         octep_write_queue_select(oct_hw, qid);
334         iowrite16(num, &cfg->queue_size);
335 }
336
337 void octep_set_vq_ready(struct octep_hw *oct_hw, u16 qid, bool ready)
338 {
339         struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
340
341         octep_write_queue_select(oct_hw, qid);
342         iowrite16(ready, &cfg->queue_enable);
343 }
344
345 bool octep_get_vq_ready(struct octep_hw *oct_hw, u16 qid)
346 {
347         struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
348
349         octep_write_queue_select(oct_hw, qid);
350         return ioread16(&cfg->queue_enable);
351 }
352
353 u16 octep_get_vq_size(struct octep_hw *oct_hw)
354 {
355         octep_write_queue_select(oct_hw, 0);
356         return ioread16(&oct_hw->common_cfg->queue_size);
357 }
358
359 static u32 octep_get_config_size(struct octep_hw *oct_hw)
360 {
361         return sizeof(struct virtio_net_config);
362 }
363
364 static void __iomem *octep_get_cap_addr(struct octep_hw *oct_hw, struct virtio_pci_cap *cap)
365 {
366         struct device *dev = &oct_hw->pdev->dev;
367         u32 length = le32_to_cpu(cap->length);
368         u32 offset = le32_to_cpu(cap->offset);
369         u8  bar    = cap->bar;
370         u32 len;
371
372         if (bar != OCTEP_HW_CAPS_BAR) {
373                 dev_err(dev, "Invalid bar: %u\n", bar);
374                 return NULL;
375         }
376         if (offset + length < offset) {
377                 dev_err(dev, "offset(%u) + length(%u) overflows\n",
378                         offset, length);
379                 return NULL;
380         }
381         len = pci_resource_len(oct_hw->pdev, bar);
382         if (offset + length > len) {
383                 dev_err(dev, "invalid cap: overflows bar space: %u > %u\n",
384                         offset + length, len);
385                 return NULL;
386         }
387         return oct_hw->base[bar] + offset;
388 }
389
390 /* In Octeon DPU device, the virtio config space is completely
391  * emulated by the device's firmware. So, the standard pci config
392  * read apis can't be used for reading the virtio capability.
393  */
394 static void octep_pci_caps_read(struct octep_hw *oct_hw, void *buf, size_t len, off_t offset)
395 {
396         u8 __iomem *bar = oct_hw->base[OCTEP_HW_CAPS_BAR];
397         u8 *p = buf;
398         size_t i;
399
400         for (i = 0; i < len; i++)
401                 *p++ = ioread8(bar + offset + i);
402 }
403
404 static int octep_pci_signature_verify(struct octep_hw *oct_hw)
405 {
406         u32 signature[2];
407
408         octep_pci_caps_read(oct_hw, &signature, sizeof(signature), 0);
409
410         if (signature[0] != OCTEP_FW_READY_SIGNATURE0)
411                 return -1;
412
413         if (signature[1] != OCTEP_FW_READY_SIGNATURE1)
414                 return -1;
415
416         return 0;
417 }
418
419 int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
420 {
421         struct octep_mbox __iomem *mbox;
422         struct device *dev = &pdev->dev;
423         struct virtio_pci_cap cap;
424         u16 notify_off;
425         int i, ret;
426         u8 pos;
427
428         oct_hw->pdev = pdev;
429         ret = octep_pci_signature_verify(oct_hw);
430         if (ret) {
431                 dev_err(dev, "Octeon Virtio FW is not initialized\n");
432                 return -EIO;
433         }
434
435         octep_pci_caps_read(oct_hw, &pos, 1, PCI_CAPABILITY_LIST);
436
437         while (pos) {
438                 octep_pci_caps_read(oct_hw, &cap, 2, pos);
439
440                 if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
441                         dev_err(dev, "Found invalid capability vndr id: %d\n", cap.cap_vndr);
442                         break;
443                 }
444
445                 octep_pci_caps_read(oct_hw, &cap, sizeof(cap), pos);
446
447                 dev_info(dev, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u\n",
448                          pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
449
450                 switch (cap.cfg_type) {
451                 case VIRTIO_PCI_CAP_COMMON_CFG:
452                         oct_hw->common_cfg = octep_get_cap_addr(oct_hw, &cap);
453                         break;
454                 case VIRTIO_PCI_CAP_NOTIFY_CFG:
455                         octep_pci_caps_read(oct_hw, &oct_hw->notify_off_multiplier,
456                                             4, pos + sizeof(cap));
457
458                         oct_hw->notify_base = octep_get_cap_addr(oct_hw, &cap);
459                         oct_hw->notify_bar = cap.bar;
460                         oct_hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
461                                                  le32_to_cpu(cap.offset);
462                         break;
463                 case VIRTIO_PCI_CAP_DEVICE_CFG:
464                         oct_hw->dev_cfg = octep_get_cap_addr(oct_hw, &cap);
465                         break;
466                 case VIRTIO_PCI_CAP_ISR_CFG:
467                         oct_hw->isr = octep_get_cap_addr(oct_hw, &cap);
468                         break;
469                 }
470
471                 pos = cap.cap_next;
472         }
473         if (!oct_hw->common_cfg || !oct_hw->notify_base ||
474             !oct_hw->dev_cfg    || !oct_hw->isr) {
475                 dev_err(dev, "Incomplete PCI capabilities");
476                 return -EIO;
477         }
478         dev_info(dev, "common cfg mapped at: 0x%016llx\n", (u64)(uintptr_t)oct_hw->common_cfg);
479         dev_info(dev, "device cfg mapped at: 0x%016llx\n", (u64)(uintptr_t)oct_hw->dev_cfg);
480         dev_info(dev, "isr cfg mapped at: 0x%016llx\n", (u64)(uintptr_t)oct_hw->isr);
481         dev_info(dev, "notify base: 0x%016llx, notify off multiplier: %u\n",
482                  (u64)(uintptr_t)oct_hw->notify_base, oct_hw->notify_off_multiplier);
483
484         oct_hw->config_size = octep_get_config_size(oct_hw);
485         oct_hw->features = octep_hw_get_dev_features(oct_hw);
486
487         ret = octep_verify_features(oct_hw->features);
488         if (ret) {
489                 dev_err(&pdev->dev, "Couldn't read features from the device FW\n");
490                 return ret;
491         }
492         oct_hw->nr_vring = vp_ioread16(&oct_hw->common_cfg->num_queues);
493
494         oct_hw->vqs = devm_kcalloc(&pdev->dev, oct_hw->nr_vring, sizeof(*oct_hw->vqs), GFP_KERNEL);
495         if (!oct_hw->vqs)
496                 return -ENOMEM;
497
498         oct_hw->irq = -1;
499
500         dev_info(&pdev->dev, "Device features : %llx\n", oct_hw->features);
501         dev_info(&pdev->dev, "Maximum queues : %u\n", oct_hw->nr_vring);
502
503         for (i = 0; i < oct_hw->nr_vring; i++) {
504                 octep_write_queue_select(oct_hw, i);
505                 notify_off = vp_ioread16(&oct_hw->common_cfg->queue_notify_off);
506                 oct_hw->vqs[i].notify_addr = oct_hw->notify_base +
507                         notify_off * oct_hw->notify_off_multiplier;
508                 oct_hw->vqs[i].cb_notify_addr = (u32 __iomem *)oct_hw->vqs[i].notify_addr + 1;
509                 oct_hw->vqs[i].notify_pa = oct_hw->notify_base_pa +
510                         notify_off * oct_hw->notify_off_multiplier;
511         }
512         mbox = octep_get_mbox(oct_hw);
513         octep_mbox_init(mbox);
514         dev_info(dev, "mbox mapped at: 0x%016llx\n", (u64)(uintptr_t)mbox);
515
516         return 0;
517 }
This page took 0.061717 seconds and 4 git commands to generate.