]> Git Repo - linux.git/blob - drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / net / ethernet / intel / idpf / idpf_virtchnl.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #include <net/libeth/rx.h>
5
6 #include "idpf.h"
7 #include "idpf_virtchnl.h"
8
9 #define IDPF_VC_XN_MIN_TIMEOUT_MSEC     2000
10 #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
11 #define IDPF_VC_XN_IDX_M                GENMASK(7, 0)
12 #define IDPF_VC_XN_SALT_M               GENMASK(15, 8)
13 #define IDPF_VC_XN_RING_LEN             U8_MAX
14
15 /**
16  * enum idpf_vc_xn_state - Virtchnl transaction status
17  * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
18  * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
19  * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
20  *                                buffer updated
21  * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
22  *                               was an error, buffer not updated
23  * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
24  * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
25  *                    return context; a callback may be provided to handle
26  *                    return
27  */
28 enum idpf_vc_xn_state {
29         IDPF_VC_XN_IDLE = 1,
30         IDPF_VC_XN_WAITING,
31         IDPF_VC_XN_COMPLETED_SUCCESS,
32         IDPF_VC_XN_COMPLETED_FAILED,
33         IDPF_VC_XN_SHUTDOWN,
34         IDPF_VC_XN_ASYNC,
35 };
36
37 struct idpf_vc_xn;
38 /* Callback for asynchronous messages */
39 typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
40                             const struct idpf_ctlq_msg *);
41
42 /**
43  * struct idpf_vc_xn - Data structure representing virtchnl transactions
44  * @completed: virtchnl event loop uses that to signal when a reply is
45  *             available, uses kernel completion API
46  * @state: virtchnl event loop stores the data below, protected by the
47  *         completion's lock.
48  * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
49  *            truncated on its way to the receiver thread according to
50  *            reply_buf.iov_len.
51  * @reply: Reference to the buffer(s) where the reply data should be written
52  *         to. May be 0-length (then NULL address permitted) if the reply data
53  *         should be ignored.
54  * @async_handler: if sent asynchronously, a callback can be provided to handle
55  *                 the reply when it's received
56  * @vc_op: corresponding opcode sent with this transaction
57  * @idx: index used as retrieval on reply receive, used for cookie
58  * @salt: changed every message to make unique, used for cookie
59  */
60 struct idpf_vc_xn {
61         struct completion completed;
62         enum idpf_vc_xn_state state;
63         size_t reply_sz;
64         struct kvec reply;
65         async_vc_cb async_handler;
66         u32 vc_op;
67         u8 idx;
68         u8 salt;
69 };
70
71 /**
72  * struct idpf_vc_xn_params - Parameters for executing transaction
73  * @send_buf: kvec for send buffer
74  * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
75  * @timeout_ms: timeout to wait for reply
76  * @async: send message asynchronously, will not wait on completion
77  * @async_handler: If sent asynchronously, optional callback handler. The user
78  *                 must be careful when using async handlers as the memory for
79  *                 the recv_buf _cannot_ be on stack if this is async.
80  * @vc_op: virtchnl op to send
81  */
82 struct idpf_vc_xn_params {
83         struct kvec send_buf;
84         struct kvec recv_buf;
85         int timeout_ms;
86         bool async;
87         async_vc_cb async_handler;
88         u32 vc_op;
89 };
90
91 /**
92  * struct idpf_vc_xn_manager - Manager for tracking transactions
93  * @ring: backing and lookup for transactions
94  * @free_xn_bm: bitmap for free transactions
95  * @xn_bm_lock: make bitmap access synchronous where necessary
96  * @salt: used to make cookie unique every message
97  */
98 struct idpf_vc_xn_manager {
99         struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
100         DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
101         spinlock_t xn_bm_lock;
102         u8 salt;
103 };
104
105 /**
106  * idpf_vid_to_vport - Translate vport id to vport pointer
107  * @adapter: private data struct
108  * @v_id: vport id to translate
109  *
110  * Returns vport matching v_id, NULL if not found.
111  */
112 static
113 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
114 {
115         u16 num_max_vports = idpf_get_max_vports(adapter);
116         int i;
117
118         for (i = 0; i < num_max_vports; i++)
119                 if (adapter->vport_ids[i] == v_id)
120                         return adapter->vports[i];
121
122         return NULL;
123 }
124
125 /**
126  * idpf_handle_event_link - Handle link event message
127  * @adapter: private data struct
128  * @v2e: virtchnl event message
129  */
130 static void idpf_handle_event_link(struct idpf_adapter *adapter,
131                                    const struct virtchnl2_event *v2e)
132 {
133         struct idpf_netdev_priv *np;
134         struct idpf_vport *vport;
135
136         vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
137         if (!vport) {
138                 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
139                                     v2e->vport_id);
140                 return;
141         }
142         np = netdev_priv(vport->netdev);
143
144         vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
145
146         if (vport->link_up == v2e->link_status)
147                 return;
148
149         vport->link_up = v2e->link_status;
150
151         if (np->state != __IDPF_VPORT_UP)
152                 return;
153
154         if (vport->link_up) {
155                 netif_tx_start_all_queues(vport->netdev);
156                 netif_carrier_on(vport->netdev);
157         } else {
158                 netif_tx_stop_all_queues(vport->netdev);
159                 netif_carrier_off(vport->netdev);
160         }
161 }
162
163 /**
164  * idpf_recv_event_msg - Receive virtchnl event message
165  * @adapter: Driver specific private structure
166  * @ctlq_msg: message to copy from
167  *
168  * Receive virtchnl event message
169  */
170 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
171                                 struct idpf_ctlq_msg *ctlq_msg)
172 {
173         int payload_size = ctlq_msg->ctx.indirect.payload->size;
174         struct virtchnl2_event *v2e;
175         u32 event;
176
177         if (payload_size < sizeof(*v2e)) {
178                 dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
179                                     ctlq_msg->cookie.mbx.chnl_opcode,
180                                     payload_size);
181                 return;
182         }
183
184         v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
185         event = le32_to_cpu(v2e->event);
186
187         switch (event) {
188         case VIRTCHNL2_EVENT_LINK_CHANGE:
189                 idpf_handle_event_link(adapter, v2e);
190                 return;
191         default:
192                 dev_err(&adapter->pdev->dev,
193                         "Unknown event %d from PF\n", event);
194                 break;
195         }
196 }
197
198 /**
199  * idpf_mb_clean - Reclaim the send mailbox queue entries
200  * @adapter: Driver specific private structure
201  *
202  * Reclaim the send mailbox queue entries to be used to send further messages
203  *
204  * Returns 0 on success, negative on failure
205  */
206 static int idpf_mb_clean(struct idpf_adapter *adapter)
207 {
208         u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
209         struct idpf_ctlq_msg **q_msg;
210         struct idpf_dma_mem *dma_mem;
211         int err;
212
213         q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
214         if (!q_msg)
215                 return -ENOMEM;
216
217         err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
218         if (err)
219                 goto err_kfree;
220
221         for (i = 0; i < num_q_msg; i++) {
222                 if (!q_msg[i])
223                         continue;
224                 dma_mem = q_msg[i]->ctx.indirect.payload;
225                 if (dma_mem)
226                         dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
227                                           dma_mem->va, dma_mem->pa);
228                 kfree(q_msg[i]);
229                 kfree(dma_mem);
230         }
231
232 err_kfree:
233         kfree(q_msg);
234
235         return err;
236 }
237
238 /**
239  * idpf_send_mb_msg - Send message over mailbox
240  * @adapter: Driver specific private structure
241  * @op: virtchnl opcode
242  * @msg_size: size of the payload
243  * @msg: pointer to buffer holding the payload
244  * @cookie: unique SW generated cookie per message
245  *
246  * Will prepare the control queue message and initiates the send api
247  *
248  * Returns 0 on success, negative on failure
249  */
250 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
251                      u16 msg_size, u8 *msg, u16 cookie)
252 {
253         struct idpf_ctlq_msg *ctlq_msg;
254         struct idpf_dma_mem *dma_mem;
255         int err;
256
257         /* If we are here and a reset is detected nothing much can be
258          * done. This thread should silently abort and expected to
259          * be corrected with a new run either by user or driver
260          * flows after reset
261          */
262         if (idpf_is_reset_detected(adapter))
263                 return 0;
264
265         err = idpf_mb_clean(adapter);
266         if (err)
267                 return err;
268
269         ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
270         if (!ctlq_msg)
271                 return -ENOMEM;
272
273         dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
274         if (!dma_mem) {
275                 err = -ENOMEM;
276                 goto dma_mem_error;
277         }
278
279         ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
280         ctlq_msg->func_id = 0;
281         ctlq_msg->data_len = msg_size;
282         ctlq_msg->cookie.mbx.chnl_opcode = op;
283         ctlq_msg->cookie.mbx.chnl_retval = 0;
284         dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
285         dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
286                                          &dma_mem->pa, GFP_ATOMIC);
287         if (!dma_mem->va) {
288                 err = -ENOMEM;
289                 goto dma_alloc_error;
290         }
291
292         /* It's possible we're just sending an opcode but no buffer */
293         if (msg && msg_size)
294                 memcpy(dma_mem->va, msg, msg_size);
295         ctlq_msg->ctx.indirect.payload = dma_mem;
296         ctlq_msg->ctx.sw_cookie.data = cookie;
297
298         err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
299         if (err)
300                 goto send_error;
301
302         return 0;
303
304 send_error:
305         dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
306                           dma_mem->pa);
307 dma_alloc_error:
308         kfree(dma_mem);
309 dma_mem_error:
310         kfree(ctlq_msg);
311
312         return err;
313 }
314
315 /* API for virtchnl "transaction" support ("xn" for short).
316  *
317  * We are reusing the completion lock to serialize the accesses to the
318  * transaction state for simplicity, but it could be its own separate synchro
319  * as well. For now, this API is only used from within a workqueue context;
320  * raw_spin_lock() is enough.
321  */
322 /**
323  * idpf_vc_xn_lock - Request exclusive access to vc transaction
324  * @xn: struct idpf_vc_xn* to access
325  */
326 #define idpf_vc_xn_lock(xn)                     \
327         raw_spin_lock(&(xn)->completed.wait.lock)
328
329 /**
330  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
331  * @xn: struct idpf_vc_xn* to access
332  */
333 #define idpf_vc_xn_unlock(xn)           \
334         raw_spin_unlock(&(xn)->completed.wait.lock)
335
336 /**
337  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
338  * reset the transaction state.
339  * @xn: struct idpf_vc_xn to update
340  */
341 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
342 {
343         xn->reply.iov_base = NULL;
344         xn->reply.iov_len = 0;
345
346         if (xn->state != IDPF_VC_XN_SHUTDOWN)
347                 xn->state = IDPF_VC_XN_IDLE;
348 }
349
350 /**
351  * idpf_vc_xn_init - Initialize virtchnl transaction object
352  * @vcxn_mngr: pointer to vc transaction manager struct
353  */
354 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
355 {
356         int i;
357
358         spin_lock_init(&vcxn_mngr->xn_bm_lock);
359
360         for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
361                 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
362
363                 xn->state = IDPF_VC_XN_IDLE;
364                 xn->idx = i;
365                 idpf_vc_xn_release_bufs(xn);
366                 init_completion(&xn->completed);
367         }
368
369         bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
370 }
371
372 /**
373  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
374  * @vcxn_mngr: pointer to vc transaction manager struct
375  *
376  * All waiting threads will be woken-up and their transaction aborted. Further
377  * operations on that object will fail.
378  */
379 static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
380 {
381         int i;
382
383         spin_lock_bh(&vcxn_mngr->xn_bm_lock);
384         bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
385         spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
386
387         for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
388                 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
389
390                 idpf_vc_xn_lock(xn);
391                 xn->state = IDPF_VC_XN_SHUTDOWN;
392                 idpf_vc_xn_release_bufs(xn);
393                 idpf_vc_xn_unlock(xn);
394                 complete_all(&xn->completed);
395         }
396 }
397
398 /**
399  * idpf_vc_xn_pop_free - Pop a free transaction from free list
400  * @vcxn_mngr: transaction manager to pop from
401  *
402  * Returns NULL if no free transactions
403  */
404 static
405 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
406 {
407         struct idpf_vc_xn *xn = NULL;
408         unsigned long free_idx;
409
410         spin_lock_bh(&vcxn_mngr->xn_bm_lock);
411         free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
412         if (free_idx == IDPF_VC_XN_RING_LEN)
413                 goto do_unlock;
414
415         clear_bit(free_idx, vcxn_mngr->free_xn_bm);
416         xn = &vcxn_mngr->ring[free_idx];
417         xn->salt = vcxn_mngr->salt++;
418
419 do_unlock:
420         spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
421
422         return xn;
423 }
424
425 /**
426  * idpf_vc_xn_push_free - Push a free transaction to free list
427  * @vcxn_mngr: transaction manager to push to
428  * @xn: transaction to push
429  */
430 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
431                                  struct idpf_vc_xn *xn)
432 {
433         idpf_vc_xn_release_bufs(xn);
434         set_bit(xn->idx, vcxn_mngr->free_xn_bm);
435 }
436
437 /**
438  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
439  * @adapter: driver specific private structure with vcxn_mngr
440  * @params: parameters for this particular transaction including
441  *   -vc_op: virtchannel operation to send
442  *   -send_buf: kvec iov for send buf and len
443  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
444  *   -timeout_ms: timeout waiting for a reply (milliseconds)
445  *   -async: don't wait for message reply, will lose caller context
446  *   -async_handler: callback to handle async replies
447  *
448  * @returns >= 0 for success, the size of the initial reply (may or may not be
449  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
450  * error.
451  */
452 static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
453                                const struct idpf_vc_xn_params *params)
454 {
455         const struct kvec *send_buf = &params->send_buf;
456         struct idpf_vc_xn *xn;
457         ssize_t retval;
458         u16 cookie;
459
460         xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
461         /* no free transactions available */
462         if (!xn)
463                 return -ENOSPC;
464
465         idpf_vc_xn_lock(xn);
466         if (xn->state == IDPF_VC_XN_SHUTDOWN) {
467                 retval = -ENXIO;
468                 goto only_unlock;
469         } else if (xn->state != IDPF_VC_XN_IDLE) {
470                 /* We're just going to clobber this transaction even though
471                  * it's not IDLE. If we don't reuse it we could theoretically
472                  * eventually leak all the free transactions and not be able to
473                  * send any messages. At least this way we make an attempt to
474                  * remain functional even though something really bad is
475                  * happening that's corrupting what was supposed to be free
476                  * transactions.
477                  */
478                 WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
479                           xn->idx, xn->vc_op);
480         }
481
482         xn->reply = params->recv_buf;
483         xn->reply_sz = 0;
484         xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
485         xn->vc_op = params->vc_op;
486         xn->async_handler = params->async_handler;
487         idpf_vc_xn_unlock(xn);
488
489         if (!params->async)
490                 reinit_completion(&xn->completed);
491         cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
492                  FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
493
494         retval = idpf_send_mb_msg(adapter, params->vc_op,
495                                   send_buf->iov_len, send_buf->iov_base,
496                                   cookie);
497         if (retval) {
498                 idpf_vc_xn_lock(xn);
499                 goto release_and_unlock;
500         }
501
502         if (params->async)
503                 return 0;
504
505         wait_for_completion_timeout(&xn->completed,
506                                     msecs_to_jiffies(params->timeout_ms));
507
508         /* No need to check the return value; we check the final state of the
509          * transaction below. It's possible the transaction actually gets more
510          * timeout than specified if we get preempted here but after
511          * wait_for_completion_timeout returns. This should be non-issue
512          * however.
513          */
514         idpf_vc_xn_lock(xn);
515         switch (xn->state) {
516         case IDPF_VC_XN_SHUTDOWN:
517                 retval = -ENXIO;
518                 goto only_unlock;
519         case IDPF_VC_XN_WAITING:
520                 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
521                                        params->vc_op, params->timeout_ms);
522                 retval = -ETIME;
523                 break;
524         case IDPF_VC_XN_COMPLETED_SUCCESS:
525                 retval = xn->reply_sz;
526                 break;
527         case IDPF_VC_XN_COMPLETED_FAILED:
528                 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
529                                        params->vc_op);
530                 retval = -EIO;
531                 break;
532         default:
533                 /* Invalid state. */
534                 WARN_ON_ONCE(1);
535                 retval = -EIO;
536                 break;
537         }
538
539 release_and_unlock:
540         idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
541         /* If we receive a VC reply after here, it will be dropped. */
542 only_unlock:
543         idpf_vc_xn_unlock(xn);
544
545         return retval;
546 }
547
548 /**
549  * idpf_vc_xn_forward_async - Handle async reply receives
550  * @adapter: private data struct
551  * @xn: transaction to handle
552  * @ctlq_msg: corresponding ctlq_msg
553  *
554  * For async sends we're going to lose the caller's context so, if an
555  * async_handler was provided, it can deal with the reply, otherwise we'll just
556  * check and report if there is an error.
557  */
558 static int
559 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
560                          const struct idpf_ctlq_msg *ctlq_msg)
561 {
562         int err = 0;
563
564         if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
565                 dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
566                                     ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
567                 xn->reply_sz = 0;
568                 err = -EINVAL;
569                 goto release_bufs;
570         }
571
572         if (xn->async_handler) {
573                 err = xn->async_handler(adapter, xn, ctlq_msg);
574                 goto release_bufs;
575         }
576
577         if (ctlq_msg->cookie.mbx.chnl_retval) {
578                 xn->reply_sz = 0;
579                 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
580                                     ctlq_msg->cookie.mbx.chnl_opcode);
581                 err = -EINVAL;
582         }
583
584 release_bufs:
585         idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
586
587         return err;
588 }
589
590 /**
591  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
592  * @adapter: driver specific private structure with vcxn_mngr
593  * @ctlq_msg: controlq message to send back to receiving thread
594  */
595 static int
596 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
597                          const struct idpf_ctlq_msg *ctlq_msg)
598 {
599         const void *payload = NULL;
600         size_t payload_size = 0;
601         struct idpf_vc_xn *xn;
602         u16 msg_info;
603         int err = 0;
604         u16 xn_idx;
605         u16 salt;
606
607         msg_info = ctlq_msg->ctx.sw_cookie.data;
608         xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
609         if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
610                 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
611                                     xn_idx);
612                 return -EINVAL;
613         }
614         xn = &adapter->vcxn_mngr->ring[xn_idx];
615         salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
616         if (xn->salt != salt) {
617                 dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
618                                     xn->salt, salt);
619                 return -EINVAL;
620         }
621
622         idpf_vc_xn_lock(xn);
623         switch (xn->state) {
624         case IDPF_VC_XN_WAITING:
625                 /* success */
626                 break;
627         case IDPF_VC_XN_IDLE:
628                 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
629                                     ctlq_msg->cookie.mbx.chnl_opcode);
630                 err = -EINVAL;
631                 goto out_unlock;
632         case IDPF_VC_XN_SHUTDOWN:
633                 /* ENXIO is a bit special here as the recv msg loop uses that
634                  * know if it should stop trying to clean the ring if we lost
635                  * the virtchnl. We need to stop playing with registers and
636                  * yield.
637                  */
638                 err = -ENXIO;
639                 goto out_unlock;
640         case IDPF_VC_XN_ASYNC:
641                 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
642                 idpf_vc_xn_unlock(xn);
643                 return err;
644         default:
645                 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
646                                     ctlq_msg->cookie.mbx.chnl_opcode);
647                 err = -EBUSY;
648                 goto out_unlock;
649         }
650
651         if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
652                 dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
653                                     ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
654                 xn->reply_sz = 0;
655                 xn->state = IDPF_VC_XN_COMPLETED_FAILED;
656                 err = -EINVAL;
657                 goto out_unlock;
658         }
659
660         if (ctlq_msg->cookie.mbx.chnl_retval) {
661                 xn->reply_sz = 0;
662                 xn->state = IDPF_VC_XN_COMPLETED_FAILED;
663                 err = -EINVAL;
664                 goto out_unlock;
665         }
666
667         if (ctlq_msg->data_len) {
668                 payload = ctlq_msg->ctx.indirect.payload->va;
669                 payload_size = ctlq_msg->ctx.indirect.payload->size;
670         }
671
672         xn->reply_sz = payload_size;
673         xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
674
675         if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
676                 memcpy(xn->reply.iov_base, payload,
677                        min_t(size_t, xn->reply.iov_len, payload_size));
678
679 out_unlock:
680         idpf_vc_xn_unlock(xn);
681         /* we _cannot_ hold lock while calling complete */
682         complete(&xn->completed);
683
684         return err;
685 }
686
687 /**
688  * idpf_recv_mb_msg - Receive message over mailbox
689  * @adapter: Driver specific private structure
690  *
691  * Will receive control queue message and posts the receive buffer. Returns 0
692  * on success and negative on failure.
693  */
694 int idpf_recv_mb_msg(struct idpf_adapter *adapter)
695 {
696         struct idpf_ctlq_msg ctlq_msg;
697         struct idpf_dma_mem *dma_mem;
698         int post_err, err;
699         u16 num_recv;
700
701         while (1) {
702                 /* This will get <= num_recv messages and output how many
703                  * actually received on num_recv.
704                  */
705                 num_recv = 1;
706                 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
707                 if (err || !num_recv)
708                         break;
709
710                 if (ctlq_msg.data_len) {
711                         dma_mem = ctlq_msg.ctx.indirect.payload;
712                 } else {
713                         dma_mem = NULL;
714                         num_recv = 0;
715                 }
716
717                 if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
718                         idpf_recv_event_msg(adapter, &ctlq_msg);
719                 else
720                         err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
721
722                 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
723                                                    adapter->hw.arq,
724                                                    &num_recv, &dma_mem);
725
726                 /* If post failed clear the only buffer we supplied */
727                 if (post_err) {
728                         if (dma_mem)
729                                 dmam_free_coherent(&adapter->pdev->dev,
730                                                    dma_mem->size, dma_mem->va,
731                                                    dma_mem->pa);
732                         break;
733                 }
734
735                 /* virtchnl trying to shutdown, stop cleaning */
736                 if (err == -ENXIO)
737                         break;
738         }
739
740         return err;
741 }
742
743 /**
744  * idpf_wait_for_marker_event - wait for software marker response
745  * @vport: virtual port data structure
746  *
747  * Returns 0 success, negative on failure.
748  **/
749 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
750 {
751         int event;
752         int i;
753
754         for (i = 0; i < vport->num_txq; i++)
755                 idpf_queue_set(SW_MARKER, vport->txqs[i]);
756
757         event = wait_event_timeout(vport->sw_marker_wq,
758                                    test_and_clear_bit(IDPF_VPORT_SW_MARKER,
759                                                       vport->flags),
760                                    msecs_to_jiffies(500));
761
762         for (i = 0; i < vport->num_txq; i++)
763                 idpf_queue_clear(POLL_MODE, vport->txqs[i]);
764
765         if (event)
766                 return 0;
767
768         dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
769
770         return -ETIMEDOUT;
771 }
772
773 /**
774  * idpf_send_ver_msg - send virtchnl version message
775  * @adapter: Driver specific private structure
776  *
777  * Send virtchnl version message.  Returns 0 on success, negative on failure.
778  */
779 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
780 {
781         struct idpf_vc_xn_params xn_params = {};
782         struct virtchnl2_version_info vvi;
783         ssize_t reply_sz;
784         u32 major, minor;
785         int err = 0;
786
787         if (adapter->virt_ver_maj) {
788                 vvi.major = cpu_to_le32(adapter->virt_ver_maj);
789                 vvi.minor = cpu_to_le32(adapter->virt_ver_min);
790         } else {
791                 vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
792                 vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
793         }
794
795         xn_params.vc_op = VIRTCHNL2_OP_VERSION;
796         xn_params.send_buf.iov_base = &vvi;
797         xn_params.send_buf.iov_len = sizeof(vvi);
798         xn_params.recv_buf = xn_params.send_buf;
799         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
800
801         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
802         if (reply_sz < 0)
803                 return reply_sz;
804         if (reply_sz < sizeof(vvi))
805                 return -EIO;
806
807         major = le32_to_cpu(vvi.major);
808         minor = le32_to_cpu(vvi.minor);
809
810         if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
811                 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
812                 return -EINVAL;
813         }
814
815         if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
816             minor > IDPF_VIRTCHNL_VERSION_MINOR)
817                 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
818
819         /* If we have a mismatch, resend version to update receiver on what
820          * version we will use.
821          */
822         if (!adapter->virt_ver_maj &&
823             major != IDPF_VIRTCHNL_VERSION_MAJOR &&
824             minor != IDPF_VIRTCHNL_VERSION_MINOR)
825                 err = -EAGAIN;
826
827         adapter->virt_ver_maj = major;
828         adapter->virt_ver_min = minor;
829
830         return err;
831 }
832
833 /**
834  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
835  * @adapter: Driver specific private structure
836  *
837  * Send virtchl get capabilities message. Returns 0 on success, negative on
838  * failure.
839  */
840 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
841 {
842         struct virtchnl2_get_capabilities caps = {};
843         struct idpf_vc_xn_params xn_params = {};
844         ssize_t reply_sz;
845
846         caps.csum_caps =
847                 cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4       |
848                             VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP   |
849                             VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP   |
850                             VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP  |
851                             VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP   |
852                             VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP   |
853                             VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP  |
854                             VIRTCHNL2_CAP_RX_CSUM_L3_IPV4       |
855                             VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP   |
856                             VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP   |
857                             VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP  |
858                             VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP   |
859                             VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP   |
860                             VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP  |
861                             VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
862                             VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
863                             VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
864                             VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
865                             VIRTCHNL2_CAP_RX_CSUM_GENERIC);
866
867         caps.seg_caps =
868                 cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP          |
869                             VIRTCHNL2_CAP_SEG_IPV4_UDP          |
870                             VIRTCHNL2_CAP_SEG_IPV4_SCTP         |
871                             VIRTCHNL2_CAP_SEG_IPV6_TCP          |
872                             VIRTCHNL2_CAP_SEG_IPV6_UDP          |
873                             VIRTCHNL2_CAP_SEG_IPV6_SCTP         |
874                             VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
875
876         caps.rss_caps =
877                 cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP          |
878                             VIRTCHNL2_CAP_RSS_IPV4_UDP          |
879                             VIRTCHNL2_CAP_RSS_IPV4_SCTP         |
880                             VIRTCHNL2_CAP_RSS_IPV4_OTHER        |
881                             VIRTCHNL2_CAP_RSS_IPV6_TCP          |
882                             VIRTCHNL2_CAP_RSS_IPV6_UDP          |
883                             VIRTCHNL2_CAP_RSS_IPV6_SCTP         |
884                             VIRTCHNL2_CAP_RSS_IPV6_OTHER);
885
886         caps.hsplit_caps =
887                 cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4     |
888                             VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
889
890         caps.rsc_caps =
891                 cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP          |
892                             VIRTCHNL2_CAP_RSC_IPV6_TCP);
893
894         caps.other_caps =
895                 cpu_to_le64(VIRTCHNL2_CAP_SRIOV                 |
896                             VIRTCHNL2_CAP_MACFILTER             |
897                             VIRTCHNL2_CAP_SPLITQ_QSCHED         |
898                             VIRTCHNL2_CAP_PROMISC               |
899                             VIRTCHNL2_CAP_LOOPBACK);
900
901         xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
902         xn_params.send_buf.iov_base = &caps;
903         xn_params.send_buf.iov_len = sizeof(caps);
904         xn_params.recv_buf.iov_base = &adapter->caps;
905         xn_params.recv_buf.iov_len = sizeof(adapter->caps);
906         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
907
908         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
909         if (reply_sz < 0)
910                 return reply_sz;
911         if (reply_sz < sizeof(adapter->caps))
912                 return -EIO;
913
914         return 0;
915 }
916
917 /**
918  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
919  * @adapter: Driver specific private structure
920  * @max_q: vport max queue structure
921  */
922 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
923                             struct idpf_vport_max_q *max_q)
924 {
925         struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
926         struct virtchnl2_get_capabilities *caps = &adapter->caps;
927         u16 default_vports = idpf_get_default_vports(adapter);
928         int max_rx_q, max_tx_q;
929
930         mutex_lock(&adapter->queue_lock);
931
932         max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
933         max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
934         if (adapter->num_alloc_vports < default_vports) {
935                 max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
936                 max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
937         } else {
938                 max_q->max_rxq = IDPF_MIN_Q;
939                 max_q->max_txq = IDPF_MIN_Q;
940         }
941         max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
942         max_q->max_complq = max_q->max_txq;
943
944         if (avail_queues->avail_rxq < max_q->max_rxq ||
945             avail_queues->avail_txq < max_q->max_txq ||
946             avail_queues->avail_bufq < max_q->max_bufq ||
947             avail_queues->avail_complq < max_q->max_complq) {
948                 mutex_unlock(&adapter->queue_lock);
949
950                 return -EINVAL;
951         }
952
953         avail_queues->avail_rxq -= max_q->max_rxq;
954         avail_queues->avail_txq -= max_q->max_txq;
955         avail_queues->avail_bufq -= max_q->max_bufq;
956         avail_queues->avail_complq -= max_q->max_complq;
957
958         mutex_unlock(&adapter->queue_lock);
959
960         return 0;
961 }
962
963 /**
964  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
965  * @adapter: Driver specific private structure
966  * @max_q: vport max queue structure
967  */
968 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
969                                struct idpf_vport_max_q *max_q)
970 {
971         struct idpf_avail_queue_info *avail_queues;
972
973         mutex_lock(&adapter->queue_lock);
974         avail_queues = &adapter->avail_queues;
975
976         avail_queues->avail_rxq += max_q->max_rxq;
977         avail_queues->avail_txq += max_q->max_txq;
978         avail_queues->avail_bufq += max_q->max_bufq;
979         avail_queues->avail_complq += max_q->max_complq;
980
981         mutex_unlock(&adapter->queue_lock);
982 }
983
984 /**
985  * idpf_init_avail_queues - Initialize available queues on the device
986  * @adapter: Driver specific private structure
987  */
988 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
989 {
990         struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
991         struct virtchnl2_get_capabilities *caps = &adapter->caps;
992
993         avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
994         avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
995         avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
996         avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
997 }
998
999 /**
1000  * idpf_get_reg_intr_vecs - Get vector queue register offset
1001  * @vport: virtual port structure
1002  * @reg_vals: Register offsets to store in
1003  *
1004  * Returns number of registers that got populated
1005  */
1006 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
1007                            struct idpf_vec_regs *reg_vals)
1008 {
1009         struct virtchnl2_vector_chunks *chunks;
1010         struct idpf_vec_regs reg_val;
1011         u16 num_vchunks, num_vec;
1012         int num_regs = 0, i, j;
1013
1014         chunks = &vport->adapter->req_vec_chunks->vchunks;
1015         num_vchunks = le16_to_cpu(chunks->num_vchunks);
1016
1017         for (j = 0; j < num_vchunks; j++) {
1018                 struct virtchnl2_vector_chunk *chunk;
1019                 u32 dynctl_reg_spacing;
1020                 u32 itrn_reg_spacing;
1021
1022                 chunk = &chunks->vchunks[j];
1023                 num_vec = le16_to_cpu(chunk->num_vectors);
1024                 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1025                 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1026                 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1027
1028                 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1029                 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1030
1031                 for (i = 0; i < num_vec; i++) {
1032                         reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1033                         reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1034                         reg_vals[num_regs].itrn_index_spacing =
1035                                                 reg_val.itrn_index_spacing;
1036
1037                         reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1038                         reg_val.itrn_reg += itrn_reg_spacing;
1039                         num_regs++;
1040                 }
1041         }
1042
1043         return num_regs;
1044 }
1045
1046 /**
1047  * idpf_vport_get_q_reg - Get the queue registers for the vport
1048  * @reg_vals: register values needing to be set
1049  * @num_regs: amount we expect to fill
1050  * @q_type: queue model
1051  * @chunks: queue regs received over mailbox
1052  *
1053  * This function parses the queue register offsets from the queue register
1054  * chunk information, with a specific queue type and stores it into the array
1055  * passed as an argument. It returns the actual number of queue registers that
1056  * are filled.
1057  */
1058 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1059                                 struct virtchnl2_queue_reg_chunks *chunks)
1060 {
1061         u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1062         int reg_filled = 0, i;
1063         u32 reg_val;
1064
1065         while (num_chunks--) {
1066                 struct virtchnl2_queue_reg_chunk *chunk;
1067                 u16 num_q;
1068
1069                 chunk = &chunks->chunks[num_chunks];
1070                 if (le32_to_cpu(chunk->type) != q_type)
1071                         continue;
1072
1073                 num_q = le32_to_cpu(chunk->num_queues);
1074                 reg_val = le64_to_cpu(chunk->qtail_reg_start);
1075                 for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1076                         reg_vals[reg_filled++] = reg_val;
1077                         reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
1078                 }
1079         }
1080
1081         return reg_filled;
1082 }
1083
1084 /**
1085  * __idpf_queue_reg_init - initialize queue registers
1086  * @vport: virtual port structure
1087  * @reg_vals: registers we are initializing
1088  * @num_regs: how many registers there are in total
1089  * @q_type: queue model
1090  *
1091  * Return number of queues that are initialized
1092  */
1093 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
1094                                  int num_regs, u32 q_type)
1095 {
1096         struct idpf_adapter *adapter = vport->adapter;
1097         int i, j, k = 0;
1098
1099         switch (q_type) {
1100         case VIRTCHNL2_QUEUE_TYPE_TX:
1101                 for (i = 0; i < vport->num_txq_grp; i++) {
1102                         struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1103
1104                         for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1105                                 tx_qgrp->txqs[j]->tail =
1106                                         idpf_get_reg_addr(adapter, reg_vals[k]);
1107                 }
1108                 break;
1109         case VIRTCHNL2_QUEUE_TYPE_RX:
1110                 for (i = 0; i < vport->num_rxq_grp; i++) {
1111                         struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1112                         u16 num_rxq = rx_qgrp->singleq.num_rxq;
1113
1114                         for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1115                                 struct idpf_rx_queue *q;
1116
1117                                 q = rx_qgrp->singleq.rxqs[j];
1118                                 q->tail = idpf_get_reg_addr(adapter,
1119                                                             reg_vals[k]);
1120                         }
1121                 }
1122                 break;
1123         case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1124                 for (i = 0; i < vport->num_rxq_grp; i++) {
1125                         struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1126                         u8 num_bufqs = vport->num_bufqs_per_qgrp;
1127
1128                         for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1129                                 struct idpf_buf_queue *q;
1130
1131                                 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1132                                 q->tail = idpf_get_reg_addr(adapter,
1133                                                             reg_vals[k]);
1134                         }
1135                 }
1136                 break;
1137         default:
1138                 break;
1139         }
1140
1141         return k;
1142 }
1143
1144 /**
1145  * idpf_queue_reg_init - initialize queue registers
1146  * @vport: virtual port structure
1147  *
1148  * Return 0 on success, negative on failure
1149  */
1150 int idpf_queue_reg_init(struct idpf_vport *vport)
1151 {
1152         struct virtchnl2_create_vport *vport_params;
1153         struct virtchnl2_queue_reg_chunks *chunks;
1154         struct idpf_vport_config *vport_config;
1155         u16 vport_idx = vport->idx;
1156         int num_regs, ret = 0;
1157         u32 *reg_vals;
1158
1159         /* We may never deal with more than 256 same type of queues */
1160         reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1161         if (!reg_vals)
1162                 return -ENOMEM;
1163
1164         vport_config = vport->adapter->vport_config[vport_idx];
1165         if (vport_config->req_qs_chunks) {
1166                 struct virtchnl2_add_queues *vc_aq =
1167                   (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
1168                 chunks = &vc_aq->chunks;
1169         } else {
1170                 vport_params = vport->adapter->vport_params_recvd[vport_idx];
1171                 chunks = &vport_params->chunks;
1172         }
1173
1174         /* Initialize Tx queue tail register address */
1175         num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1176                                         VIRTCHNL2_QUEUE_TYPE_TX,
1177                                         chunks);
1178         if (num_regs < vport->num_txq) {
1179                 ret = -EINVAL;
1180                 goto free_reg_vals;
1181         }
1182
1183         num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1184                                          VIRTCHNL2_QUEUE_TYPE_TX);
1185         if (num_regs < vport->num_txq) {
1186                 ret = -EINVAL;
1187                 goto free_reg_vals;
1188         }
1189
1190         /* Initialize Rx/buffer queue tail register address based on Rx queue
1191          * model
1192          */
1193         if (idpf_is_queue_model_split(vport->rxq_model)) {
1194                 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1195                                                 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1196                                                 chunks);
1197                 if (num_regs < vport->num_bufq) {
1198                         ret = -EINVAL;
1199                         goto free_reg_vals;
1200                 }
1201
1202                 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1203                                                  VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1204                 if (num_regs < vport->num_bufq) {
1205                         ret = -EINVAL;
1206                         goto free_reg_vals;
1207                 }
1208         } else {
1209                 num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1210                                                 VIRTCHNL2_QUEUE_TYPE_RX,
1211                                                 chunks);
1212                 if (num_regs < vport->num_rxq) {
1213                         ret = -EINVAL;
1214                         goto free_reg_vals;
1215                 }
1216
1217                 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1218                                                  VIRTCHNL2_QUEUE_TYPE_RX);
1219                 if (num_regs < vport->num_rxq) {
1220                         ret = -EINVAL;
1221                         goto free_reg_vals;
1222                 }
1223         }
1224
1225 free_reg_vals:
1226         kfree(reg_vals);
1227
1228         return ret;
1229 }
1230
1231 /**
1232  * idpf_send_create_vport_msg - Send virtchnl create vport message
1233  * @adapter: Driver specific private structure
1234  * @max_q: vport max queue info
1235  *
1236  * send virtchnl creae vport message
1237  *
1238  * Returns 0 on success, negative on failure
1239  */
1240 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1241                                struct idpf_vport_max_q *max_q)
1242 {
1243         struct virtchnl2_create_vport *vport_msg;
1244         struct idpf_vc_xn_params xn_params = {};
1245         u16 idx = adapter->next_vport;
1246         int err, buf_size;
1247         ssize_t reply_sz;
1248
1249         buf_size = sizeof(struct virtchnl2_create_vport);
1250         if (!adapter->vport_params_reqd[idx]) {
1251                 adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1252                                                           GFP_KERNEL);
1253                 if (!adapter->vport_params_reqd[idx])
1254                         return -ENOMEM;
1255         }
1256
1257         vport_msg = adapter->vport_params_reqd[idx];
1258         vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1259         vport_msg->vport_index = cpu_to_le16(idx);
1260
1261         if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1262                 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1263         else
1264                 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1265
1266         if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1267                 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1268         else
1269                 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1270
1271         err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1272         if (err) {
1273                 dev_err(&adapter->pdev->dev, "Enough queues are not available");
1274
1275                 return err;
1276         }
1277
1278         if (!adapter->vport_params_recvd[idx]) {
1279                 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1280                                                            GFP_KERNEL);
1281                 if (!adapter->vport_params_recvd[idx]) {
1282                         err = -ENOMEM;
1283                         goto free_vport_params;
1284                 }
1285         }
1286
1287         xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1288         xn_params.send_buf.iov_base = vport_msg;
1289         xn_params.send_buf.iov_len = buf_size;
1290         xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1291         xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1292         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1293         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1294         if (reply_sz < 0) {
1295                 err = reply_sz;
1296                 goto free_vport_params;
1297         }
1298         if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
1299                 err = -EIO;
1300                 goto free_vport_params;
1301         }
1302
1303         return 0;
1304
1305 free_vport_params:
1306         kfree(adapter->vport_params_recvd[idx]);
1307         adapter->vport_params_recvd[idx] = NULL;
1308         kfree(adapter->vport_params_reqd[idx]);
1309         adapter->vport_params_reqd[idx] = NULL;
1310
1311         return err;
1312 }
1313
1314 /**
1315  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1316  * @vport: virtual port structure
1317  *
1318  * Return 0 on success, error on failure
1319  */
1320 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1321 {
1322         struct idpf_adapter *adapter = vport->adapter;
1323         struct virtchnl2_create_vport *vport_msg;
1324         u64 rx_desc_ids, tx_desc_ids;
1325
1326         vport_msg = adapter->vport_params_recvd[vport->idx];
1327
1328         if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1329             (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1330              vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1331                 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1332                 return -EOPNOTSUPP;
1333         }
1334
1335         rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1336         tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1337
1338         if (idpf_is_queue_model_split(vport->rxq_model)) {
1339                 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1340                         dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1341                         vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1342                 }
1343         } else {
1344                 if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1345                         vport->base_rxd = true;
1346         }
1347
1348         if (!idpf_is_queue_model_split(vport->txq_model))
1349                 return 0;
1350
1351         if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1352                 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1353                 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1354         }
1355
1356         return 0;
1357 }
1358
1359 /**
1360  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1361  * @vport: virtual port data structure
1362  *
1363  * Send virtchnl destroy vport message.  Returns 0 on success, negative on
1364  * failure.
1365  */
1366 int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
1367 {
1368         struct idpf_vc_xn_params xn_params = {};
1369         struct virtchnl2_vport v_id;
1370         ssize_t reply_sz;
1371
1372         v_id.vport_id = cpu_to_le32(vport->vport_id);
1373
1374         xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1375         xn_params.send_buf.iov_base = &v_id;
1376         xn_params.send_buf.iov_len = sizeof(v_id);
1377         xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1378         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1379
1380         return reply_sz < 0 ? reply_sz : 0;
1381 }
1382
1383 /**
1384  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1385  * @vport: virtual port data structure
1386  *
1387  * Send enable vport virtchnl message.  Returns 0 on success, negative on
1388  * failure.
1389  */
1390 int idpf_send_enable_vport_msg(struct idpf_vport *vport)
1391 {
1392         struct idpf_vc_xn_params xn_params = {};
1393         struct virtchnl2_vport v_id;
1394         ssize_t reply_sz;
1395
1396         v_id.vport_id = cpu_to_le32(vport->vport_id);
1397
1398         xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1399         xn_params.send_buf.iov_base = &v_id;
1400         xn_params.send_buf.iov_len = sizeof(v_id);
1401         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1402         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1403
1404         return reply_sz < 0 ? reply_sz : 0;
1405 }
1406
1407 /**
1408  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1409  * @vport: virtual port data structure
1410  *
1411  * Send disable vport virtchnl message.  Returns 0 on success, negative on
1412  * failure.
1413  */
1414 int idpf_send_disable_vport_msg(struct idpf_vport *vport)
1415 {
1416         struct idpf_vc_xn_params xn_params = {};
1417         struct virtchnl2_vport v_id;
1418         ssize_t reply_sz;
1419
1420         v_id.vport_id = cpu_to_le32(vport->vport_id);
1421
1422         xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1423         xn_params.send_buf.iov_base = &v_id;
1424         xn_params.send_buf.iov_len = sizeof(v_id);
1425         xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1426         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1427
1428         return reply_sz < 0 ? reply_sz : 0;
1429 }
1430
1431 /**
1432  * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1433  * @vport: virtual port data structure
1434  *
1435  * Send config tx queues virtchnl message. Returns 0 on success, negative on
1436  * failure.
1437  */
1438 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
1439 {
1440         struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
1441         struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1442         struct idpf_vc_xn_params xn_params = {};
1443         u32 config_sz, chunk_sz, buf_sz;
1444         int totqs, num_msgs, num_chunks;
1445         ssize_t reply_sz;
1446         int i, k = 0;
1447
1448         totqs = vport->num_txq + vport->num_complq;
1449         qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
1450         if (!qi)
1451                 return -ENOMEM;
1452
1453         /* Populate the queue info buffer with all queue context info */
1454         for (i = 0; i < vport->num_txq_grp; i++) {
1455                 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1456                 int j, sched_mode;
1457
1458                 for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1459                         qi[k].queue_id =
1460                                 cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1461                         qi[k].model =
1462                                 cpu_to_le16(vport->txq_model);
1463                         qi[k].type =
1464                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1465                         qi[k].ring_len =
1466                                 cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
1467                         qi[k].dma_ring_addr =
1468                                 cpu_to_le64(tx_qgrp->txqs[j]->dma);
1469                         if (idpf_is_queue_model_split(vport->txq_model)) {
1470                                 struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1471
1472                                 qi[k].tx_compl_queue_id =
1473                                         cpu_to_le16(tx_qgrp->complq->q_id);
1474                                 qi[k].relative_queue_id = cpu_to_le16(j);
1475
1476                                 if (idpf_queue_has(FLOW_SCH_EN, q))
1477                                         qi[k].sched_mode =
1478                                         cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
1479                                 else
1480                                         qi[k].sched_mode =
1481                                         cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1482                         } else {
1483                                 qi[k].sched_mode =
1484                                         cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1485                         }
1486                 }
1487
1488                 if (!idpf_is_queue_model_split(vport->txq_model))
1489                         continue;
1490
1491                 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1492                 qi[k].model = cpu_to_le16(vport->txq_model);
1493                 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1494                 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
1495                 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
1496
1497                 if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
1498                         sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1499                 else
1500                         sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1501                 qi[k].sched_mode = cpu_to_le16(sched_mode);
1502
1503                 k++;
1504         }
1505
1506         /* Make sure accounting agrees */
1507         if (k != totqs)
1508                 return -EINVAL;
1509
1510         /* Chunk up the queue contexts into multiple messages to avoid
1511          * sending a control queue message buffer that is too large
1512          */
1513         config_sz = sizeof(struct virtchnl2_config_tx_queues);
1514         chunk_sz = sizeof(struct virtchnl2_txq_info);
1515
1516         num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1517                            totqs);
1518         num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1519
1520         buf_sz = struct_size(ctq, qinfo, num_chunks);
1521         ctq = kzalloc(buf_sz, GFP_KERNEL);
1522         if (!ctq)
1523                 return -ENOMEM;
1524
1525         xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1526         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1527
1528         for (i = 0, k = 0; i < num_msgs; i++) {
1529                 memset(ctq, 0, buf_sz);
1530                 ctq->vport_id = cpu_to_le32(vport->vport_id);
1531                 ctq->num_qinfo = cpu_to_le16(num_chunks);
1532                 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
1533
1534                 xn_params.send_buf.iov_base = ctq;
1535                 xn_params.send_buf.iov_len = buf_sz;
1536                 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1537                 if (reply_sz < 0)
1538                         return reply_sz;
1539
1540                 k += num_chunks;
1541                 totqs -= num_chunks;
1542                 num_chunks = min(num_chunks, totqs);
1543                 /* Recalculate buffer size */
1544                 buf_sz = struct_size(ctq, qinfo, num_chunks);
1545         }
1546
1547         return 0;
1548 }
1549
1550 /**
1551  * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1552  * @vport: virtual port data structure
1553  *
1554  * Send config rx queues virtchnl message.  Returns 0 on success, negative on
1555  * failure.
1556  */
1557 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
1558 {
1559         struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
1560         struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
1561         struct idpf_vc_xn_params xn_params = {};
1562         u32 config_sz, chunk_sz, buf_sz;
1563         int totqs, num_msgs, num_chunks;
1564         ssize_t reply_sz;
1565         int i, k = 0;
1566
1567         totqs = vport->num_rxq + vport->num_bufq;
1568         qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
1569         if (!qi)
1570                 return -ENOMEM;
1571
1572         /* Populate the queue info buffer with all queue context info */
1573         for (i = 0; i < vport->num_rxq_grp; i++) {
1574                 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1575                 u16 num_rxq;
1576                 int j;
1577
1578                 if (!idpf_is_queue_model_split(vport->rxq_model))
1579                         goto setup_rxqs;
1580
1581                 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1582                         struct idpf_buf_queue *bufq =
1583                                 &rx_qgrp->splitq.bufq_sets[j].bufq;
1584
1585                         qi[k].queue_id = cpu_to_le32(bufq->q_id);
1586                         qi[k].model = cpu_to_le16(vport->rxq_model);
1587                         qi[k].type =
1588                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1589                         qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1590                         qi[k].ring_len = cpu_to_le16(bufq->desc_count);
1591                         qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
1592                         qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
1593                         qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1594                         qi[k].rx_buffer_low_watermark =
1595                                 cpu_to_le16(bufq->rx_buffer_low_watermark);
1596                         if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1597                                 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1598                 }
1599
1600 setup_rxqs:
1601                 if (idpf_is_queue_model_split(vport->rxq_model))
1602                         num_rxq = rx_qgrp->splitq.num_rxq_sets;
1603                 else
1604                         num_rxq = rx_qgrp->singleq.num_rxq;
1605
1606                 for (j = 0; j < num_rxq; j++, k++) {
1607                         const struct idpf_bufq_set *sets;
1608                         struct idpf_rx_queue *rxq;
1609
1610                         if (!idpf_is_queue_model_split(vport->rxq_model)) {
1611                                 rxq = rx_qgrp->singleq.rxqs[j];
1612                                 goto common_qi_fields;
1613                         }
1614
1615                         rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1616                         sets = rxq->bufq_sets;
1617
1618                         /* In splitq mode, RXQ buffer size should be
1619                          * set to that of the first buffer queue
1620                          * associated with this RXQ.
1621                          */
1622                         rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
1623
1624                         qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1625                         if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1626                                 qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
1627                                 qi[k].rx_bufq2_id =
1628                                         cpu_to_le16(sets[1].bufq.q_id);
1629                         }
1630                         qi[k].rx_buffer_low_watermark =
1631                                 cpu_to_le16(rxq->rx_buffer_low_watermark);
1632                         if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1633                                 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1634
1635                         rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1636
1637                         if (idpf_queue_has(HSPLIT_EN, rxq)) {
1638                                 qi[k].qflags |=
1639                                         cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1640                                 qi[k].hdr_buffer_size =
1641                                         cpu_to_le16(rxq->rx_hbuf_size);
1642                         }
1643
1644 common_qi_fields:
1645                         qi[k].queue_id = cpu_to_le32(rxq->q_id);
1646                         qi[k].model = cpu_to_le16(vport->rxq_model);
1647                         qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1648                         qi[k].ring_len = cpu_to_le16(rxq->desc_count);
1649                         qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
1650                         qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
1651                         qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
1652                         qi[k].qflags |=
1653                                 cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1654                         qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
1655                 }
1656         }
1657
1658         /* Make sure accounting agrees */
1659         if (k != totqs)
1660                 return -EINVAL;
1661
1662         /* Chunk up the queue contexts into multiple messages to avoid
1663          * sending a control queue message buffer that is too large
1664          */
1665         config_sz = sizeof(struct virtchnl2_config_rx_queues);
1666         chunk_sz = sizeof(struct virtchnl2_rxq_info);
1667
1668         num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1669                            totqs);
1670         num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1671
1672         buf_sz = struct_size(crq, qinfo, num_chunks);
1673         crq = kzalloc(buf_sz, GFP_KERNEL);
1674         if (!crq)
1675                 return -ENOMEM;
1676
1677         xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1678         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1679
1680         for (i = 0, k = 0; i < num_msgs; i++) {
1681                 memset(crq, 0, buf_sz);
1682                 crq->vport_id = cpu_to_le32(vport->vport_id);
1683                 crq->num_qinfo = cpu_to_le16(num_chunks);
1684                 memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
1685
1686                 xn_params.send_buf.iov_base = crq;
1687                 xn_params.send_buf.iov_len = buf_sz;
1688                 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1689                 if (reply_sz < 0)
1690                         return reply_sz;
1691
1692                 k += num_chunks;
1693                 totqs -= num_chunks;
1694                 num_chunks = min(num_chunks, totqs);
1695                 /* Recalculate buffer size */
1696                 buf_sz = struct_size(crq, qinfo, num_chunks);
1697         }
1698
1699         return 0;
1700 }
1701
1702 /**
1703  * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1704  * queues message
1705  * @vport: virtual port data structure
1706  * @ena: if true enable, false disable
1707  *
1708  * Send enable or disable queues virtchnl message. Returns 0 on success,
1709  * negative on failure.
1710  */
1711 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
1712 {
1713         struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
1714         struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
1715         u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
1716         struct idpf_vc_xn_params xn_params = {};
1717         struct virtchnl2_queue_chunks *qcs;
1718         u32 config_sz, chunk_sz, buf_sz;
1719         ssize_t reply_sz;
1720         int i, j, k = 0;
1721
1722         num_txq = vport->num_txq + vport->num_complq;
1723         num_rxq = vport->num_rxq + vport->num_bufq;
1724         num_q = num_txq + num_rxq;
1725         buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
1726         qc = kzalloc(buf_sz, GFP_KERNEL);
1727         if (!qc)
1728                 return -ENOMEM;
1729
1730         for (i = 0; i < vport->num_txq_grp; i++) {
1731                 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1732
1733                 for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1734                         qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1735                         qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1736                         qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1737                 }
1738         }
1739         if (vport->num_txq != k)
1740                 return -EINVAL;
1741
1742         if (!idpf_is_queue_model_split(vport->txq_model))
1743                 goto setup_rx;
1744
1745         for (i = 0; i < vport->num_txq_grp; i++, k++) {
1746                 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1747
1748                 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1749                 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1750                 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1751         }
1752         if (vport->num_complq != (k - vport->num_txq))
1753                 return -EINVAL;
1754
1755 setup_rx:
1756         for (i = 0; i < vport->num_rxq_grp; i++) {
1757                 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1758
1759                 if (idpf_is_queue_model_split(vport->rxq_model))
1760                         num_rxq = rx_qgrp->splitq.num_rxq_sets;
1761                 else
1762                         num_rxq = rx_qgrp->singleq.num_rxq;
1763
1764                 for (j = 0; j < num_rxq; j++, k++) {
1765                         if (idpf_is_queue_model_split(vport->rxq_model)) {
1766                                 qc[k].start_queue_id =
1767                                 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
1768                                 qc[k].type =
1769                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1770                         } else {
1771                                 qc[k].start_queue_id =
1772                                 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
1773                                 qc[k].type =
1774                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1775                         }
1776                         qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1777                 }
1778         }
1779         if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
1780                 return -EINVAL;
1781
1782         if (!idpf_is_queue_model_split(vport->rxq_model))
1783                 goto send_msg;
1784
1785         for (i = 0; i < vport->num_rxq_grp; i++) {
1786                 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1787
1788                 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1789                         const struct idpf_buf_queue *q;
1790
1791                         q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1792                         qc[k].type =
1793                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1794                         qc[k].start_queue_id = cpu_to_le32(q->q_id);
1795                         qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1796                 }
1797         }
1798         if (vport->num_bufq != k - (vport->num_txq +
1799                                     vport->num_complq +
1800                                     vport->num_rxq))
1801                 return -EINVAL;
1802
1803 send_msg:
1804         /* Chunk up the queue info into multiple messages */
1805         config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
1806         chunk_sz = sizeof(struct virtchnl2_queue_chunk);
1807
1808         num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1809                            num_q);
1810         num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1811
1812         buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1813         eq = kzalloc(buf_sz, GFP_KERNEL);
1814         if (!eq)
1815                 return -ENOMEM;
1816
1817         if (ena) {
1818                 xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
1819                 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1820         } else {
1821                 xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
1822                 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1823         }
1824
1825         for (i = 0, k = 0; i < num_msgs; i++) {
1826                 memset(eq, 0, buf_sz);
1827                 eq->vport_id = cpu_to_le32(vport->vport_id);
1828                 eq->chunks.num_chunks = cpu_to_le16(num_chunks);
1829                 qcs = &eq->chunks;
1830                 memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
1831
1832                 xn_params.send_buf.iov_base = eq;
1833                 xn_params.send_buf.iov_len = buf_sz;
1834                 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1835                 if (reply_sz < 0)
1836                         return reply_sz;
1837
1838                 k += num_chunks;
1839                 num_q -= num_chunks;
1840                 num_chunks = min(num_chunks, num_q);
1841                 /* Recalculate buffer size */
1842                 buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1843         }
1844
1845         return 0;
1846 }
1847
1848 /**
1849  * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1850  * vector message
1851  * @vport: virtual port data structure
1852  * @map: true for map and false for unmap
1853  *
1854  * Send map or unmap queue vector virtchnl message.  Returns 0 on success,
1855  * negative on failure.
1856  */
1857 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
1858 {
1859         struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
1860         struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
1861         struct idpf_vc_xn_params xn_params = {};
1862         u32 config_sz, chunk_sz, buf_sz;
1863         u32 num_msgs, num_chunks, num_q;
1864         ssize_t reply_sz;
1865         int i, j, k = 0;
1866
1867         num_q = vport->num_txq + vport->num_rxq;
1868
1869         buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
1870         vqv = kzalloc(buf_sz, GFP_KERNEL);
1871         if (!vqv)
1872                 return -ENOMEM;
1873
1874         for (i = 0; i < vport->num_txq_grp; i++) {
1875                 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1876
1877                 for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1878                         vqv[k].queue_type =
1879                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1880                         vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1881
1882                         if (idpf_is_queue_model_split(vport->txq_model)) {
1883                                 vqv[k].vector_id =
1884                                 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
1885                                 vqv[k].itr_idx =
1886                                 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
1887                         } else {
1888                                 vqv[k].vector_id =
1889                                 cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
1890                                 vqv[k].itr_idx =
1891                                 cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
1892                         }
1893                 }
1894         }
1895
1896         if (vport->num_txq != k)
1897                 return -EINVAL;
1898
1899         for (i = 0; i < vport->num_rxq_grp; i++) {
1900                 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1901                 u16 num_rxq;
1902
1903                 if (idpf_is_queue_model_split(vport->rxq_model))
1904                         num_rxq = rx_qgrp->splitq.num_rxq_sets;
1905                 else
1906                         num_rxq = rx_qgrp->singleq.num_rxq;
1907
1908                 for (j = 0; j < num_rxq; j++, k++) {
1909                         struct idpf_rx_queue *rxq;
1910
1911                         if (idpf_is_queue_model_split(vport->rxq_model))
1912                                 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1913                         else
1914                                 rxq = rx_qgrp->singleq.rxqs[j];
1915
1916                         vqv[k].queue_type =
1917                                 cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1918                         vqv[k].queue_id = cpu_to_le32(rxq->q_id);
1919                         vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
1920                         vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
1921                 }
1922         }
1923
1924         if (idpf_is_queue_model_split(vport->txq_model)) {
1925                 if (vport->num_rxq != k - vport->num_complq)
1926                         return -EINVAL;
1927         } else {
1928                 if (vport->num_rxq != k - vport->num_txq)
1929                         return -EINVAL;
1930         }
1931
1932         /* Chunk up the vector info into multiple messages */
1933         config_sz = sizeof(struct virtchnl2_queue_vector_maps);
1934         chunk_sz = sizeof(struct virtchnl2_queue_vector);
1935
1936         num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1937                            num_q);
1938         num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1939
1940         buf_sz = struct_size(vqvm, qv_maps, num_chunks);
1941         vqvm = kzalloc(buf_sz, GFP_KERNEL);
1942         if (!vqvm)
1943                 return -ENOMEM;
1944
1945         if (map) {
1946                 xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
1947                 xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1948         } else {
1949                 xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
1950                 xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1951         }
1952
1953         for (i = 0, k = 0; i < num_msgs; i++) {
1954                 memset(vqvm, 0, buf_sz);
1955                 xn_params.send_buf.iov_base = vqvm;
1956                 xn_params.send_buf.iov_len = buf_sz;
1957                 vqvm->vport_id = cpu_to_le32(vport->vport_id);
1958                 vqvm->num_qv_maps = cpu_to_le16(num_chunks);
1959                 memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
1960
1961                 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1962                 if (reply_sz < 0)
1963                         return reply_sz;
1964
1965                 k += num_chunks;
1966                 num_q -= num_chunks;
1967                 num_chunks = min(num_chunks, num_q);
1968                 /* Recalculate buffer size */
1969                 buf_sz = struct_size(vqvm, qv_maps, num_chunks);
1970         }
1971
1972         return 0;
1973 }
1974
1975 /**
1976  * idpf_send_enable_queues_msg - send enable queues virtchnl message
1977  * @vport: Virtual port private data structure
1978  *
1979  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
1980  * failure.
1981  */
1982 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
1983 {
1984         return idpf_send_ena_dis_queues_msg(vport, true);
1985 }
1986
1987 /**
1988  * idpf_send_disable_queues_msg - send disable queues virtchnl message
1989  * @vport: Virtual port private data structure
1990  *
1991  * Will send disable queues virtchnl message.  Returns 0 on success, negative
1992  * on failure.
1993  */
1994 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
1995 {
1996         int err, i;
1997
1998         err = idpf_send_ena_dis_queues_msg(vport, false);
1999         if (err)
2000                 return err;
2001
2002         /* switch to poll mode as interrupts will be disabled after disable
2003          * queues virtchnl message is sent
2004          */
2005         for (i = 0; i < vport->num_txq; i++)
2006                 idpf_queue_set(POLL_MODE, vport->txqs[i]);
2007
2008         /* schedule the napi to receive all the marker packets */
2009         local_bh_disable();
2010         for (i = 0; i < vport->num_q_vectors; i++)
2011                 napi_schedule(&vport->q_vectors[i].napi);
2012         local_bh_enable();
2013
2014         return idpf_wait_for_marker_event(vport);
2015 }
2016
2017 /**
2018  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2019  * structure
2020  * @dchunks: Destination chunks to store data to
2021  * @schunks: Source chunks to copy data from
2022  * @num_chunks: number of chunks to copy
2023  */
2024 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2025                                              struct virtchnl2_queue_reg_chunk *schunks,
2026                                              u16 num_chunks)
2027 {
2028         u16 i;
2029
2030         for (i = 0; i < num_chunks; i++) {
2031                 dchunks[i].type = schunks[i].type;
2032                 dchunks[i].start_queue_id = schunks[i].start_queue_id;
2033                 dchunks[i].num_queues = schunks[i].num_queues;
2034         }
2035 }
2036
2037 /**
2038  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2039  * @vport: Virtual port private data structure
2040  *
2041  * Will send delete queues virtchnl message. Return 0 on success, negative on
2042  * failure.
2043  */
2044 int idpf_send_delete_queues_msg(struct idpf_vport *vport)
2045 {
2046         struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2047         struct virtchnl2_create_vport *vport_params;
2048         struct virtchnl2_queue_reg_chunks *chunks;
2049         struct idpf_vc_xn_params xn_params = {};
2050         struct idpf_vport_config *vport_config;
2051         u16 vport_idx = vport->idx;
2052         ssize_t reply_sz;
2053         u16 num_chunks;
2054         int buf_size;
2055
2056         vport_config = vport->adapter->vport_config[vport_idx];
2057         if (vport_config->req_qs_chunks) {
2058                 chunks = &vport_config->req_qs_chunks->chunks;
2059         } else {
2060                 vport_params = vport->adapter->vport_params_recvd[vport_idx];
2061                 chunks = &vport_params->chunks;
2062         }
2063
2064         num_chunks = le16_to_cpu(chunks->num_chunks);
2065         buf_size = struct_size(eq, chunks.chunks, num_chunks);
2066
2067         eq = kzalloc(buf_size, GFP_KERNEL);
2068         if (!eq)
2069                 return -ENOMEM;
2070
2071         eq->vport_id = cpu_to_le32(vport->vport_id);
2072         eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2073
2074         idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
2075                                          num_chunks);
2076
2077         xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2078         xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2079         xn_params.send_buf.iov_base = eq;
2080         xn_params.send_buf.iov_len = buf_size;
2081         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2082
2083         return reply_sz < 0 ? reply_sz : 0;
2084 }
2085
2086 /**
2087  * idpf_send_config_queues_msg - Send config queues virtchnl message
2088  * @vport: Virtual port private data structure
2089  *
2090  * Will send config queues virtchnl message. Returns 0 on success, negative on
2091  * failure.
2092  */
2093 int idpf_send_config_queues_msg(struct idpf_vport *vport)
2094 {
2095         int err;
2096
2097         err = idpf_send_config_tx_queues_msg(vport);
2098         if (err)
2099                 return err;
2100
2101         return idpf_send_config_rx_queues_msg(vport);
2102 }
2103
2104 /**
2105  * idpf_send_add_queues_msg - Send virtchnl add queues message
2106  * @vport: Virtual port private data structure
2107  * @num_tx_q: number of transmit queues
2108  * @num_complq: number of transmit completion queues
2109  * @num_rx_q: number of receive queues
2110  * @num_rx_bufq: number of receive buffer queues
2111  *
2112  * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2113  * we should not change any fields within vport itself in this function.
2114  */
2115 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
2116                              u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
2117 {
2118         struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2119         struct idpf_vc_xn_params xn_params = {};
2120         struct idpf_vport_config *vport_config;
2121         struct virtchnl2_add_queues aq = {};
2122         u16 vport_idx = vport->idx;
2123         ssize_t reply_sz;
2124         int size;
2125
2126         vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2127         if (!vc_msg)
2128                 return -ENOMEM;
2129
2130         vport_config = vport->adapter->vport_config[vport_idx];
2131         kfree(vport_config->req_qs_chunks);
2132         vport_config->req_qs_chunks = NULL;
2133
2134         aq.vport_id = cpu_to_le32(vport->vport_id);
2135         aq.num_tx_q = cpu_to_le16(num_tx_q);
2136         aq.num_tx_complq = cpu_to_le16(num_complq);
2137         aq.num_rx_q = cpu_to_le16(num_rx_q);
2138         aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
2139
2140         xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2141         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2142         xn_params.send_buf.iov_base = &aq;
2143         xn_params.send_buf.iov_len = sizeof(aq);
2144         xn_params.recv_buf.iov_base = vc_msg;
2145         xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2146         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2147         if (reply_sz < 0)
2148                 return reply_sz;
2149
2150         /* compare vc_msg num queues with vport num queues */
2151         if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
2152             le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
2153             le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
2154             le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
2155                 return -EINVAL;
2156
2157         size = struct_size(vc_msg, chunks.chunks,
2158                            le16_to_cpu(vc_msg->chunks.num_chunks));
2159         if (reply_sz < size)
2160                 return -EIO;
2161
2162         vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
2163         if (!vport_config->req_qs_chunks)
2164                 return -ENOMEM;
2165
2166         return 0;
2167 }
2168
2169 /**
2170  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2171  * @adapter: Driver specific private structure
2172  * @num_vectors: number of vectors to be allocated
2173  *
2174  * Returns 0 on success, negative on failure.
2175  */
2176 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2177 {
2178         struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2179         struct idpf_vc_xn_params xn_params = {};
2180         struct virtchnl2_alloc_vectors ac = {};
2181         ssize_t reply_sz;
2182         u16 num_vchunks;
2183         int size;
2184
2185         ac.num_vectors = cpu_to_le16(num_vectors);
2186
2187         rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2188         if (!rcvd_vec)
2189                 return -ENOMEM;
2190
2191         xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2192         xn_params.send_buf.iov_base = &ac;
2193         xn_params.send_buf.iov_len = sizeof(ac);
2194         xn_params.recv_buf.iov_base = rcvd_vec;
2195         xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2196         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2197         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2198         if (reply_sz < 0)
2199                 return reply_sz;
2200
2201         num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2202         size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2203         if (reply_sz < size)
2204                 return -EIO;
2205
2206         if (size > IDPF_CTLQ_MAX_BUF_LEN)
2207                 return -EINVAL;
2208
2209         kfree(adapter->req_vec_chunks);
2210         adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2211         if (!adapter->req_vec_chunks)
2212                 return -ENOMEM;
2213
2214         if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2215                 kfree(adapter->req_vec_chunks);
2216                 adapter->req_vec_chunks = NULL;
2217                 return -EINVAL;
2218         }
2219
2220         return 0;
2221 }
2222
2223 /**
2224  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2225  * @adapter: Driver specific private structure
2226  *
2227  * Returns 0 on success, negative on failure.
2228  */
2229 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2230 {
2231         struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2232         struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2233         struct idpf_vc_xn_params xn_params = {};
2234         ssize_t reply_sz;
2235         int buf_size;
2236
2237         buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2238
2239         xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2240         xn_params.send_buf.iov_base = vcs;
2241         xn_params.send_buf.iov_len = buf_size;
2242         xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2243         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2244         if (reply_sz < 0)
2245                 return reply_sz;
2246
2247         kfree(adapter->req_vec_chunks);
2248         adapter->req_vec_chunks = NULL;
2249
2250         return 0;
2251 }
2252
2253 /**
2254  * idpf_get_max_vfs - Get max number of vfs supported
2255  * @adapter: Driver specific private structure
2256  *
2257  * Returns max number of VFs
2258  */
2259 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2260 {
2261         return le16_to_cpu(adapter->caps.max_sriov_vfs);
2262 }
2263
2264 /**
2265  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2266  * @adapter: Driver specific private structure
2267  * @num_vfs: number of virtual functions to be created
2268  *
2269  * Returns 0 on success, negative on failure.
2270  */
2271 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2272 {
2273         struct virtchnl2_sriov_vfs_info svi = {};
2274         struct idpf_vc_xn_params xn_params = {};
2275         ssize_t reply_sz;
2276
2277         svi.num_vfs = cpu_to_le16(num_vfs);
2278         xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2279         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2280         xn_params.send_buf.iov_base = &svi;
2281         xn_params.send_buf.iov_len = sizeof(svi);
2282         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2283
2284         return reply_sz < 0 ? reply_sz : 0;
2285 }
2286
2287 /**
2288  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2289  * @vport: vport to get stats for
2290  *
2291  * Returns 0 on success, negative on failure.
2292  */
2293 int idpf_send_get_stats_msg(struct idpf_vport *vport)
2294 {
2295         struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2296         struct rtnl_link_stats64 *netstats = &np->netstats;
2297         struct virtchnl2_vport_stats stats_msg = {};
2298         struct idpf_vc_xn_params xn_params = {};
2299         ssize_t reply_sz;
2300
2301
2302         /* Don't send get_stats message if the link is down */
2303         if (np->state <= __IDPF_VPORT_DOWN)
2304                 return 0;
2305
2306         stats_msg.vport_id = cpu_to_le32(vport->vport_id);
2307
2308         xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2309         xn_params.send_buf.iov_base = &stats_msg;
2310         xn_params.send_buf.iov_len = sizeof(stats_msg);
2311         xn_params.recv_buf = xn_params.send_buf;
2312         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2313
2314         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2315         if (reply_sz < 0)
2316                 return reply_sz;
2317         if (reply_sz < sizeof(stats_msg))
2318                 return -EIO;
2319
2320         spin_lock_bh(&np->stats_lock);
2321
2322         netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2323                                le64_to_cpu(stats_msg.rx_multicast) +
2324                                le64_to_cpu(stats_msg.rx_broadcast);
2325         netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2326                                le64_to_cpu(stats_msg.tx_multicast) +
2327                                le64_to_cpu(stats_msg.tx_broadcast);
2328         netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2329         netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2330         netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2331         netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2332         netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2333         netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2334
2335         vport->port_stats.vport_stats = stats_msg;
2336
2337         spin_unlock_bh(&np->stats_lock);
2338
2339         return 0;
2340 }
2341
2342 /**
2343  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2344  * @vport: virtual port data structure
2345  * @get: flag to set or get rss look up table
2346  *
2347  * Returns 0 on success, negative on failure.
2348  */
2349 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
2350 {
2351         struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2352         struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2353         struct idpf_vc_xn_params xn_params = {};
2354         struct idpf_rss_data *rss_data;
2355         int buf_size, lut_buf_size;
2356         ssize_t reply_sz;
2357         int i;
2358
2359         rss_data =
2360                 &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2361         buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2362         rl = kzalloc(buf_size, GFP_KERNEL);
2363         if (!rl)
2364                 return -ENOMEM;
2365
2366         rl->vport_id = cpu_to_le32(vport->vport_id);
2367
2368         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2369         xn_params.send_buf.iov_base = rl;
2370         xn_params.send_buf.iov_len = buf_size;
2371
2372         if (get) {
2373                 recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2374                 if (!recv_rl)
2375                         return -ENOMEM;
2376                 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2377                 xn_params.recv_buf.iov_base = recv_rl;
2378                 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2379         } else {
2380                 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2381                 for (i = 0; i < rss_data->rss_lut_size; i++)
2382                         rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
2383
2384                 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2385         }
2386         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2387         if (reply_sz < 0)
2388                 return reply_sz;
2389         if (!get)
2390                 return 0;
2391         if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2392                 return -EIO;
2393
2394         lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2395         if (reply_sz < lut_buf_size)
2396                 return -EIO;
2397
2398         /* size didn't change, we can reuse existing lut buf */
2399         if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2400                 goto do_memcpy;
2401
2402         rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2403         kfree(rss_data->rss_lut);
2404
2405         rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2406         if (!rss_data->rss_lut) {
2407                 rss_data->rss_lut_size = 0;
2408                 return -ENOMEM;
2409         }
2410
2411 do_memcpy:
2412         memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2413
2414         return 0;
2415 }
2416
2417 /**
2418  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2419  * @vport: virtual port data structure
2420  * @get: flag to set or get rss look up table
2421  *
2422  * Returns 0 on success, negative on failure
2423  */
2424 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
2425 {
2426         struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2427         struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2428         struct idpf_vc_xn_params xn_params = {};
2429         struct idpf_rss_data *rss_data;
2430         ssize_t reply_sz;
2431         int i, buf_size;
2432         u16 key_size;
2433
2434         rss_data =
2435                 &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2436         buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2437         rk = kzalloc(buf_size, GFP_KERNEL);
2438         if (!rk)
2439                 return -ENOMEM;
2440
2441         rk->vport_id = cpu_to_le32(vport->vport_id);
2442         xn_params.send_buf.iov_base = rk;
2443         xn_params.send_buf.iov_len = buf_size;
2444         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2445         if (get) {
2446                 recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2447                 if (!recv_rk)
2448                         return -ENOMEM;
2449
2450                 xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2451                 xn_params.recv_buf.iov_base = recv_rk;
2452                 xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2453         } else {
2454                 rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2455                 for (i = 0; i < rss_data->rss_key_size; i++)
2456                         rk->key_flex[i] = rss_data->rss_key[i];
2457
2458                 xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2459         }
2460
2461         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2462         if (reply_sz < 0)
2463                 return reply_sz;
2464         if (!get)
2465                 return 0;
2466         if (reply_sz < sizeof(struct virtchnl2_rss_key))
2467                 return -EIO;
2468
2469         key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2470                          le16_to_cpu(recv_rk->key_len));
2471         if (reply_sz < key_size)
2472                 return -EIO;
2473
2474         /* key len didn't change, reuse existing buf */
2475         if (rss_data->rss_key_size == key_size)
2476                 goto do_memcpy;
2477
2478         rss_data->rss_key_size = key_size;
2479         kfree(rss_data->rss_key);
2480         rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
2481         if (!rss_data->rss_key) {
2482                 rss_data->rss_key_size = 0;
2483                 return -ENOMEM;
2484         }
2485
2486 do_memcpy:
2487         memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
2488
2489         return 0;
2490 }
2491
2492 /**
2493  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2494  * @ptype: ptype lookup table
2495  * @pstate: state machine for ptype lookup table
2496  * @ipv4: ipv4 or ipv6
2497  * @frag: fragmentation allowed
2498  *
2499  */
2500 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
2501                                    struct idpf_ptype_state *pstate,
2502                                    bool ipv4, bool frag)
2503 {
2504         if (!pstate->outer_ip || !pstate->outer_frag) {
2505                 pstate->outer_ip = true;
2506
2507                 if (ipv4)
2508                         ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
2509                 else
2510                         ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
2511
2512                 if (frag) {
2513                         ptype->outer_frag = LIBETH_RX_PT_FRAG;
2514                         pstate->outer_frag = true;
2515                 }
2516         } else {
2517                 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
2518                 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
2519
2520                 if (ipv4)
2521                         ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
2522                 else
2523                         ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
2524
2525                 if (frag)
2526                         ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
2527         }
2528 }
2529
2530 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
2531 {
2532         if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2533             ptype->inner_prot)
2534                 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
2535         else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2536                  ptype->outer_ip)
2537                 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
2538         else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
2539                 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
2540         else
2541                 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
2542
2543         libeth_rx_pt_gen_hash_type(ptype);
2544 }
2545
2546 /**
2547  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2548  * @vport: virtual port data structure
2549  *
2550  * Returns 0 on success, negative on failure.
2551  */
2552 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
2553 {
2554         struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
2555         struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
2556         struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
2557         int max_ptype, ptypes_recvd = 0, ptype_offset;
2558         struct idpf_adapter *adapter = vport->adapter;
2559         struct idpf_vc_xn_params xn_params = {};
2560         u16 next_ptype_id = 0;
2561         ssize_t reply_sz;
2562         int i, j, k;
2563
2564         if (vport->rx_ptype_lkup)
2565                 return 0;
2566
2567         if (idpf_is_queue_model_split(vport->rxq_model))
2568                 max_ptype = IDPF_RX_MAX_PTYPE;
2569         else
2570                 max_ptype = IDPF_RX_MAX_BASE_PTYPE;
2571
2572         ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
2573         if (!ptype_lkup)
2574                 return -ENOMEM;
2575
2576         get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
2577         if (!get_ptype_info)
2578                 return -ENOMEM;
2579
2580         ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2581         if (!ptype_info)
2582                 return -ENOMEM;
2583
2584         xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
2585         xn_params.send_buf.iov_base = get_ptype_info;
2586         xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
2587         xn_params.recv_buf.iov_base = ptype_info;
2588         xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2589         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2590
2591         while (next_ptype_id < max_ptype) {
2592                 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
2593
2594                 if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
2595                         get_ptype_info->num_ptypes =
2596                                 cpu_to_le16(max_ptype - next_ptype_id);
2597                 else
2598                         get_ptype_info->num_ptypes =
2599                                 cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
2600
2601                 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2602                 if (reply_sz < 0)
2603                         return reply_sz;
2604
2605                 if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
2606                         return -EIO;
2607
2608                 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
2609                 if (ptypes_recvd > max_ptype)
2610                         return -EINVAL;
2611
2612                 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
2613                                 le16_to_cpu(get_ptype_info->num_ptypes);
2614
2615                 ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
2616
2617                 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
2618                         struct idpf_ptype_state pstate = { };
2619                         struct virtchnl2_ptype *ptype;
2620                         u16 id;
2621
2622                         ptype = (struct virtchnl2_ptype *)
2623                                         ((u8 *)ptype_info + ptype_offset);
2624
2625                         ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
2626                         if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
2627                                 return -EINVAL;
2628
2629                         /* 0xFFFF indicates end of ptypes */
2630                         if (le16_to_cpu(ptype->ptype_id_10) ==
2631                                                         IDPF_INVALID_PTYPE_ID)
2632                                 goto out;
2633
2634                         if (idpf_is_queue_model_split(vport->rxq_model))
2635                                 k = le16_to_cpu(ptype->ptype_id_10);
2636                         else
2637                                 k = ptype->ptype_id_8;
2638
2639                         for (j = 0; j < ptype->proto_id_count; j++) {
2640                                 id = le16_to_cpu(ptype->proto_id[j]);
2641                                 switch (id) {
2642                                 case VIRTCHNL2_PROTO_HDR_GRE:
2643                                         if (pstate.tunnel_state ==
2644                                                         IDPF_PTYPE_TUNNEL_IP) {
2645                                                 ptype_lkup[k].tunnel_type =
2646                                                 LIBETH_RX_PT_TUNNEL_IP_GRENAT;
2647                                                 pstate.tunnel_state |=
2648                                                 IDPF_PTYPE_TUNNEL_IP_GRENAT;
2649                                         }
2650                                         break;
2651                                 case VIRTCHNL2_PROTO_HDR_MAC:
2652                                         ptype_lkup[k].outer_ip =
2653                                                 LIBETH_RX_PT_OUTER_L2;
2654                                         if (pstate.tunnel_state ==
2655                                                         IDPF_TUN_IP_GRE) {
2656                                                 ptype_lkup[k].tunnel_type =
2657                                                 LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
2658                                                 pstate.tunnel_state |=
2659                                                 IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
2660                                         }
2661                                         break;
2662                                 case VIRTCHNL2_PROTO_HDR_IPV4:
2663                                         idpf_fill_ptype_lookup(&ptype_lkup[k],
2664                                                                &pstate, true,
2665                                                                false);
2666                                         break;
2667                                 case VIRTCHNL2_PROTO_HDR_IPV6:
2668                                         idpf_fill_ptype_lookup(&ptype_lkup[k],
2669                                                                &pstate, false,
2670                                                                false);
2671                                         break;
2672                                 case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
2673                                         idpf_fill_ptype_lookup(&ptype_lkup[k],
2674                                                                &pstate, true,
2675                                                                true);
2676                                         break;
2677                                 case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
2678                                         idpf_fill_ptype_lookup(&ptype_lkup[k],
2679                                                                &pstate, false,
2680                                                                true);
2681                                         break;
2682                                 case VIRTCHNL2_PROTO_HDR_UDP:
2683                                         ptype_lkup[k].inner_prot =
2684                                         LIBETH_RX_PT_INNER_UDP;
2685                                         break;
2686                                 case VIRTCHNL2_PROTO_HDR_TCP:
2687                                         ptype_lkup[k].inner_prot =
2688                                         LIBETH_RX_PT_INNER_TCP;
2689                                         break;
2690                                 case VIRTCHNL2_PROTO_HDR_SCTP:
2691                                         ptype_lkup[k].inner_prot =
2692                                         LIBETH_RX_PT_INNER_SCTP;
2693                                         break;
2694                                 case VIRTCHNL2_PROTO_HDR_ICMP:
2695                                         ptype_lkup[k].inner_prot =
2696                                         LIBETH_RX_PT_INNER_ICMP;
2697                                         break;
2698                                 case VIRTCHNL2_PROTO_HDR_PAY:
2699                                         ptype_lkup[k].payload_layer =
2700                                                 LIBETH_RX_PT_PAYLOAD_L2;
2701                                         break;
2702                                 case VIRTCHNL2_PROTO_HDR_ICMPV6:
2703                                 case VIRTCHNL2_PROTO_HDR_IPV6_EH:
2704                                 case VIRTCHNL2_PROTO_HDR_PRE_MAC:
2705                                 case VIRTCHNL2_PROTO_HDR_POST_MAC:
2706                                 case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
2707                                 case VIRTCHNL2_PROTO_HDR_SVLAN:
2708                                 case VIRTCHNL2_PROTO_HDR_CVLAN:
2709                                 case VIRTCHNL2_PROTO_HDR_MPLS:
2710                                 case VIRTCHNL2_PROTO_HDR_MMPLS:
2711                                 case VIRTCHNL2_PROTO_HDR_PTP:
2712                                 case VIRTCHNL2_PROTO_HDR_CTRL:
2713                                 case VIRTCHNL2_PROTO_HDR_LLDP:
2714                                 case VIRTCHNL2_PROTO_HDR_ARP:
2715                                 case VIRTCHNL2_PROTO_HDR_ECP:
2716                                 case VIRTCHNL2_PROTO_HDR_EAPOL:
2717                                 case VIRTCHNL2_PROTO_HDR_PPPOD:
2718                                 case VIRTCHNL2_PROTO_HDR_PPPOE:
2719                                 case VIRTCHNL2_PROTO_HDR_IGMP:
2720                                 case VIRTCHNL2_PROTO_HDR_AH:
2721                                 case VIRTCHNL2_PROTO_HDR_ESP:
2722                                 case VIRTCHNL2_PROTO_HDR_IKE:
2723                                 case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
2724                                 case VIRTCHNL2_PROTO_HDR_L2TPV2:
2725                                 case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
2726                                 case VIRTCHNL2_PROTO_HDR_L2TPV3:
2727                                 case VIRTCHNL2_PROTO_HDR_GTP:
2728                                 case VIRTCHNL2_PROTO_HDR_GTP_EH:
2729                                 case VIRTCHNL2_PROTO_HDR_GTPCV2:
2730                                 case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
2731                                 case VIRTCHNL2_PROTO_HDR_GTPU:
2732                                 case VIRTCHNL2_PROTO_HDR_GTPU_UL:
2733                                 case VIRTCHNL2_PROTO_HDR_GTPU_DL:
2734                                 case VIRTCHNL2_PROTO_HDR_ECPRI:
2735                                 case VIRTCHNL2_PROTO_HDR_VRRP:
2736                                 case VIRTCHNL2_PROTO_HDR_OSPF:
2737                                 case VIRTCHNL2_PROTO_HDR_TUN:
2738                                 case VIRTCHNL2_PROTO_HDR_NVGRE:
2739                                 case VIRTCHNL2_PROTO_HDR_VXLAN:
2740                                 case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
2741                                 case VIRTCHNL2_PROTO_HDR_GENEVE:
2742                                 case VIRTCHNL2_PROTO_HDR_NSH:
2743                                 case VIRTCHNL2_PROTO_HDR_QUIC:
2744                                 case VIRTCHNL2_PROTO_HDR_PFCP:
2745                                 case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
2746                                 case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
2747                                 case VIRTCHNL2_PROTO_HDR_RTP:
2748                                 case VIRTCHNL2_PROTO_HDR_NO_PROTO:
2749                                         break;
2750                                 default:
2751                                         break;
2752                                 }
2753                         }
2754
2755                         idpf_finalize_ptype_lookup(&ptype_lkup[k]);
2756                 }
2757         }
2758
2759 out:
2760         vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
2761
2762         return 0;
2763 }
2764
2765 /**
2766  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2767  *                                  message
2768  * @vport: virtual port data structure
2769  *
2770  * Returns 0 on success, negative on failure.
2771  */
2772 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
2773 {
2774         struct idpf_vc_xn_params xn_params = {};
2775         struct virtchnl2_loopback loopback;
2776         ssize_t reply_sz;
2777
2778         loopback.vport_id = cpu_to_le32(vport->vport_id);
2779         loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2780
2781         xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
2782         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2783         xn_params.send_buf.iov_base = &loopback;
2784         xn_params.send_buf.iov_len = sizeof(loopback);
2785         reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2786
2787         return reply_sz < 0 ? reply_sz : 0;
2788 }
2789
2790 /**
2791  * idpf_find_ctlq - Given a type and id, find ctlq info
2792  * @hw: hardware struct
2793  * @type: type of ctrlq to find
2794  * @id: ctlq id to find
2795  *
2796  * Returns pointer to found ctlq info struct, NULL otherwise.
2797  */
2798 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
2799                                              enum idpf_ctlq_type type, int id)
2800 {
2801         struct idpf_ctlq_info *cq, *tmp;
2802
2803         list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
2804                 if (cq->q_id == id && cq->cq_type == type)
2805                         return cq;
2806
2807         return NULL;
2808 }
2809
2810 /**
2811  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2812  * @adapter: adapter info struct
2813  *
2814  * Returns 0 on success, negative otherwise
2815  */
2816 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
2817 {
2818         struct idpf_ctlq_create_info ctlq_info[] = {
2819                 {
2820                         .type = IDPF_CTLQ_TYPE_MAILBOX_TX,
2821                         .id = IDPF_DFLT_MBX_ID,
2822                         .len = IDPF_DFLT_MBX_Q_LEN,
2823                         .buf_size = IDPF_CTLQ_MAX_BUF_LEN
2824                 },
2825                 {
2826                         .type = IDPF_CTLQ_TYPE_MAILBOX_RX,
2827                         .id = IDPF_DFLT_MBX_ID,
2828                         .len = IDPF_DFLT_MBX_Q_LEN,
2829                         .buf_size = IDPF_CTLQ_MAX_BUF_LEN
2830                 }
2831         };
2832         struct idpf_hw *hw = &adapter->hw;
2833         int err;
2834
2835         adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
2836
2837         err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
2838         if (err)
2839                 return err;
2840
2841         hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
2842                                  IDPF_DFLT_MBX_ID);
2843         hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
2844                                  IDPF_DFLT_MBX_ID);
2845
2846         if (!hw->asq || !hw->arq) {
2847                 idpf_ctlq_deinit(hw);
2848
2849                 return -ENOENT;
2850         }
2851
2852         adapter->state = __IDPF_VER_CHECK;
2853
2854         return 0;
2855 }
2856
2857 /**
2858  * idpf_deinit_dflt_mbx - Free up ctlqs setup
2859  * @adapter: Driver specific private data structure
2860  */
2861 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
2862 {
2863         if (adapter->hw.arq && adapter->hw.asq) {
2864                 idpf_mb_clean(adapter);
2865                 idpf_ctlq_deinit(&adapter->hw);
2866         }
2867         adapter->hw.arq = NULL;
2868         adapter->hw.asq = NULL;
2869 }
2870
2871 /**
2872  * idpf_vport_params_buf_rel - Release memory for MailBox resources
2873  * @adapter: Driver specific private data structure
2874  *
2875  * Will release memory to hold the vport parameters received on MailBox
2876  */
2877 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
2878 {
2879         kfree(adapter->vport_params_recvd);
2880         adapter->vport_params_recvd = NULL;
2881         kfree(adapter->vport_params_reqd);
2882         adapter->vport_params_reqd = NULL;
2883         kfree(adapter->vport_ids);
2884         adapter->vport_ids = NULL;
2885 }
2886
2887 /**
2888  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
2889  * @adapter: Driver specific private data structure
2890  *
2891  * Will alloc memory to hold the vport parameters received on MailBox
2892  */
2893 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
2894 {
2895         u16 num_max_vports = idpf_get_max_vports(adapter);
2896
2897         adapter->vport_params_reqd = kcalloc(num_max_vports,
2898                                              sizeof(*adapter->vport_params_reqd),
2899                                              GFP_KERNEL);
2900         if (!adapter->vport_params_reqd)
2901                 return -ENOMEM;
2902
2903         adapter->vport_params_recvd = kcalloc(num_max_vports,
2904                                               sizeof(*adapter->vport_params_recvd),
2905                                               GFP_KERNEL);
2906         if (!adapter->vport_params_recvd)
2907                 goto err_mem;
2908
2909         adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
2910         if (!adapter->vport_ids)
2911                 goto err_mem;
2912
2913         if (adapter->vport_config)
2914                 return 0;
2915
2916         adapter->vport_config = kcalloc(num_max_vports,
2917                                         sizeof(*adapter->vport_config),
2918                                         GFP_KERNEL);
2919         if (!adapter->vport_config)
2920                 goto err_mem;
2921
2922         return 0;
2923
2924 err_mem:
2925         idpf_vport_params_buf_rel(adapter);
2926
2927         return -ENOMEM;
2928 }
2929
2930 /**
2931  * idpf_vc_core_init - Initialize state machine and get driver specific
2932  * resources
2933  * @adapter: Driver specific private structure
2934  *
2935  * This function will initialize the state machine and request all necessary
2936  * resources required by the device driver. Once the state machine is
2937  * initialized, allocate memory to store vport specific information and also
2938  * requests required interrupts.
2939  *
2940  * Returns 0 on success, -EAGAIN function will get called again,
2941  * otherwise negative on failure.
2942  */
2943 int idpf_vc_core_init(struct idpf_adapter *adapter)
2944 {
2945         int task_delay = 30;
2946         u16 num_max_vports;
2947         int err = 0;
2948
2949         if (!adapter->vcxn_mngr) {
2950                 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
2951                 if (!adapter->vcxn_mngr) {
2952                         err = -ENOMEM;
2953                         goto init_failed;
2954                 }
2955         }
2956         idpf_vc_xn_init(adapter->vcxn_mngr);
2957
2958         while (adapter->state != __IDPF_INIT_SW) {
2959                 switch (adapter->state) {
2960                 case __IDPF_VER_CHECK:
2961                         err = idpf_send_ver_msg(adapter);
2962                         switch (err) {
2963                         case 0:
2964                                 /* success, move state machine forward */
2965                                 adapter->state = __IDPF_GET_CAPS;
2966                                 fallthrough;
2967                         case -EAGAIN:
2968                                 goto restart;
2969                         default:
2970                                 /* Something bad happened, try again but only a
2971                                  * few times.
2972                                  */
2973                                 goto init_failed;
2974                         }
2975                 case __IDPF_GET_CAPS:
2976                         err = idpf_send_get_caps_msg(adapter);
2977                         if (err)
2978                                 goto init_failed;
2979                         adapter->state = __IDPF_INIT_SW;
2980                         break;
2981                 default:
2982                         dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
2983                                 adapter->state);
2984                         err = -EINVAL;
2985                         goto init_failed;
2986                 }
2987                 break;
2988 restart:
2989                 /* Give enough time before proceeding further with
2990                  * state machine
2991                  */
2992                 msleep(task_delay);
2993         }
2994
2995         pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
2996         num_max_vports = idpf_get_max_vports(adapter);
2997         adapter->max_vports = num_max_vports;
2998         adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
2999                                   GFP_KERNEL);
3000         if (!adapter->vports)
3001                 return -ENOMEM;
3002
3003         if (!adapter->netdevs) {
3004                 adapter->netdevs = kcalloc(num_max_vports,
3005                                            sizeof(struct net_device *),
3006                                            GFP_KERNEL);
3007                 if (!adapter->netdevs) {
3008                         err = -ENOMEM;
3009                         goto err_netdev_alloc;
3010                 }
3011         }
3012
3013         err = idpf_vport_params_buf_alloc(adapter);
3014         if (err) {
3015                 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3016                         err);
3017                 goto err_netdev_alloc;
3018         }
3019
3020         /* Start the mailbox task before requesting vectors. This will ensure
3021          * vector information response from mailbox is handled
3022          */
3023         queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3024
3025         queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3026                            msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3027
3028         err = idpf_intr_req(adapter);
3029         if (err) {
3030                 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3031                         err);
3032                 goto err_intr_req;
3033         }
3034
3035         idpf_init_avail_queues(adapter);
3036
3037         /* Skew the delay for init tasks for each function based on fn number
3038          * to prevent every function from making the same call simultaneously.
3039          */
3040         queue_delayed_work(adapter->init_wq, &adapter->init_task,
3041                            msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3042
3043         set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3044
3045         return 0;
3046
3047 err_intr_req:
3048         cancel_delayed_work_sync(&adapter->serv_task);
3049         cancel_delayed_work_sync(&adapter->mbx_task);
3050         idpf_vport_params_buf_rel(adapter);
3051 err_netdev_alloc:
3052         kfree(adapter->vports);
3053         adapter->vports = NULL;
3054         return err;
3055
3056 init_failed:
3057         /* Don't retry if we're trying to go down, just bail. */
3058         if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3059                 return err;
3060
3061         if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3062                 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3063
3064                 return -EFAULT;
3065         }
3066         /* If it reached here, it is possible that mailbox queue initialization
3067          * register writes might not have taken effect. Retry to initialize
3068          * the mailbox again
3069          */
3070         adapter->state = __IDPF_VER_CHECK;
3071         if (adapter->vcxn_mngr)
3072                 idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3073         idpf_deinit_dflt_mbx(adapter);
3074         set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3075         queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3076                            msecs_to_jiffies(task_delay));
3077
3078         return -EAGAIN;
3079 }
3080
3081 /**
3082  * idpf_vc_core_deinit - Device deinit routine
3083  * @adapter: Driver specific private structure
3084  *
3085  */
3086 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3087 {
3088         if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3089                 return;
3090
3091         idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3092         idpf_deinit_task(adapter);
3093         idpf_intr_rel(adapter);
3094
3095         cancel_delayed_work_sync(&adapter->serv_task);
3096         cancel_delayed_work_sync(&adapter->mbx_task);
3097
3098         idpf_vport_params_buf_rel(adapter);
3099
3100         kfree(adapter->vports);
3101         adapter->vports = NULL;
3102
3103         clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3104 }
3105
3106 /**
3107  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3108  * @vport: virtual port data struct
3109  *
3110  * This function requests the vector information required for the vport and
3111  * stores the vector indexes received from the 'global vector distribution'
3112  * in the vport's queue vectors array.
3113  *
3114  * Return 0 on success, error on failure
3115  */
3116 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
3117 {
3118         struct idpf_vector_info vec_info;
3119         int num_alloc_vecs;
3120
3121         vec_info.num_curr_vecs = vport->num_q_vectors;
3122         vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
3123         vec_info.default_vport = vport->default_vport;
3124         vec_info.index = vport->idx;
3125
3126         num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3127                                                      vport->q_vector_idxs,
3128                                                      &vec_info);
3129         if (num_alloc_vecs <= 0) {
3130                 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3131                         num_alloc_vecs);
3132                 return -EINVAL;
3133         }
3134
3135         vport->num_q_vectors = num_alloc_vecs;
3136
3137         return 0;
3138 }
3139
3140 /**
3141  * idpf_vport_init - Initialize virtual port
3142  * @vport: virtual port to be initialized
3143  * @max_q: vport max queue info
3144  *
3145  * Will initialize vport with the info received through MB earlier
3146  */
3147 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3148 {
3149         struct idpf_adapter *adapter = vport->adapter;
3150         struct virtchnl2_create_vport *vport_msg;
3151         struct idpf_vport_config *vport_config;
3152         u16 tx_itr[] = {2, 8, 64, 128, 256};
3153         u16 rx_itr[] = {2, 8, 32, 96, 128};
3154         struct idpf_rss_data *rss_data;
3155         u16 idx = vport->idx;
3156
3157         vport_config = adapter->vport_config[idx];
3158         rss_data = &vport_config->user_config.rss_data;
3159         vport_msg = adapter->vport_params_recvd[idx];
3160
3161         vport_config->max_q.max_txq = max_q->max_txq;
3162         vport_config->max_q.max_rxq = max_q->max_rxq;
3163         vport_config->max_q.max_complq = max_q->max_complq;
3164         vport_config->max_q.max_bufq = max_q->max_bufq;
3165
3166         vport->txq_model = le16_to_cpu(vport_msg->txq_model);
3167         vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3168         vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3169         vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3170
3171         rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3172                                        le16_to_cpu(vport_msg->rss_key_size));
3173         rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3174
3175         ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3176         vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3177
3178         /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3179         memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3180         memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3181
3182         idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3183
3184         idpf_vport_init_num_qs(vport, vport_msg);
3185         idpf_vport_calc_num_q_desc(vport);
3186         idpf_vport_calc_num_q_groups(vport);
3187         idpf_vport_alloc_vec_indexes(vport);
3188
3189         vport->crc_enable = adapter->crc_enable;
3190 }
3191
3192 /**
3193  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3194  * @adapter: adapter structure to get the mailbox vector id
3195  * @vecids: Array of vector ids
3196  * @num_vecids: number of vector ids
3197  * @chunks: vector ids received over mailbox
3198  *
3199  * Will initialize the mailbox vector id which is received from the
3200  * get capabilities and data queue vector ids with ids received as
3201  * mailbox parameters.
3202  * Returns number of ids filled
3203  */
3204 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3205                      u16 *vecids, int num_vecids,
3206                      struct virtchnl2_vector_chunks *chunks)
3207 {
3208         u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3209         int num_vecid_filled = 0;
3210         int i, j;
3211
3212         vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3213         num_vecid_filled++;
3214
3215         for (j = 0; j < num_chunks; j++) {
3216                 struct virtchnl2_vector_chunk *chunk;
3217                 u16 start_vecid, num_vec;
3218
3219                 chunk = &chunks->vchunks[j];
3220                 num_vec = le16_to_cpu(chunk->num_vectors);
3221                 start_vecid = le16_to_cpu(chunk->start_vector_id);
3222
3223                 for (i = 0; i < num_vec; i++) {
3224                         if ((num_vecid_filled + i) < num_vecids) {
3225                                 vecids[num_vecid_filled + i] = start_vecid;
3226                                 start_vecid++;
3227                         } else {
3228                                 break;
3229                         }
3230                 }
3231                 num_vecid_filled = num_vecid_filled + i;
3232         }
3233
3234         return num_vecid_filled;
3235 }
3236
3237 /**
3238  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3239  * @qids: Array of queue ids
3240  * @num_qids: number of queue ids
3241  * @q_type: queue model
3242  * @chunks: queue ids received over mailbox
3243  *
3244  * Will initialize all queue ids with ids received as mailbox parameters
3245  * Returns number of ids filled
3246  */
3247 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3248                                     struct virtchnl2_queue_reg_chunks *chunks)
3249 {
3250         u16 num_chunks = le16_to_cpu(chunks->num_chunks);
3251         u32 num_q_id_filled = 0, i;
3252         u32 start_q_id, num_q;
3253
3254         while (num_chunks--) {
3255                 struct virtchnl2_queue_reg_chunk *chunk;
3256
3257                 chunk = &chunks->chunks[num_chunks];
3258                 if (le32_to_cpu(chunk->type) != q_type)
3259                         continue;
3260
3261                 num_q = le32_to_cpu(chunk->num_queues);
3262                 start_q_id = le32_to_cpu(chunk->start_queue_id);
3263
3264                 for (i = 0; i < num_q; i++) {
3265                         if ((num_q_id_filled + i) < num_qids) {
3266                                 qids[num_q_id_filled + i] = start_q_id;
3267                                 start_q_id++;
3268                         } else {
3269                                 break;
3270                         }
3271                 }
3272                 num_q_id_filled = num_q_id_filled + i;
3273         }
3274
3275         return num_q_id_filled;
3276 }
3277
3278 /**
3279  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3280  * @vport: virtual port for which the queues ids are initialized
3281  * @qids: queue ids
3282  * @num_qids: number of queue ids
3283  * @q_type: type of queue
3284  *
3285  * Will initialize all queue ids with ids received as mailbox
3286  * parameters. Returns number of queue ids initialized.
3287  */
3288 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3289                                        const u32 *qids,
3290                                        int num_qids,
3291                                        u32 q_type)
3292 {
3293         int i, j, k = 0;
3294
3295         switch (q_type) {
3296         case VIRTCHNL2_QUEUE_TYPE_TX:
3297                 for (i = 0; i < vport->num_txq_grp; i++) {
3298                         struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3299
3300                         for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3301                                 tx_qgrp->txqs[j]->q_id = qids[k];
3302                 }
3303                 break;
3304         case VIRTCHNL2_QUEUE_TYPE_RX:
3305                 for (i = 0; i < vport->num_rxq_grp; i++) {
3306                         struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3307                         u16 num_rxq;
3308
3309                         if (idpf_is_queue_model_split(vport->rxq_model))
3310                                 num_rxq = rx_qgrp->splitq.num_rxq_sets;
3311                         else
3312                                 num_rxq = rx_qgrp->singleq.num_rxq;
3313
3314                         for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3315                                 struct idpf_rx_queue *q;
3316
3317                                 if (idpf_is_queue_model_split(vport->rxq_model))
3318                                         q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3319                                 else
3320                                         q = rx_qgrp->singleq.rxqs[j];
3321                                 q->q_id = qids[k];
3322                         }
3323                 }
3324                 break;
3325         case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3326                 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
3327                         struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3328
3329                         tx_qgrp->complq->q_id = qids[k];
3330                 }
3331                 break;
3332         case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3333                 for (i = 0; i < vport->num_rxq_grp; i++) {
3334                         struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3335                         u8 num_bufqs = vport->num_bufqs_per_qgrp;
3336
3337                         for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3338                                 struct idpf_buf_queue *q;
3339
3340                                 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3341                                 q->q_id = qids[k];
3342                         }
3343                 }
3344                 break;
3345         default:
3346                 break;
3347         }
3348
3349         return k;
3350 }
3351
3352 /**
3353  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3354  * @vport: virtual port for which the queues ids are initialized
3355  *
3356  * Will initialize all queue ids with ids received as mailbox parameters.
3357  * Returns 0 on success, negative if all the queues are not initialized.
3358  */
3359 int idpf_vport_queue_ids_init(struct idpf_vport *vport)
3360 {
3361         struct virtchnl2_create_vport *vport_params;
3362         struct virtchnl2_queue_reg_chunks *chunks;
3363         struct idpf_vport_config *vport_config;
3364         u16 vport_idx = vport->idx;
3365         int num_ids, err = 0;
3366         u16 q_type;
3367         u32 *qids;
3368
3369         vport_config = vport->adapter->vport_config[vport_idx];
3370         if (vport_config->req_qs_chunks) {
3371                 struct virtchnl2_add_queues *vc_aq =
3372                         (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
3373                 chunks = &vc_aq->chunks;
3374         } else {
3375                 vport_params = vport->adapter->vport_params_recvd[vport_idx];
3376                 chunks = &vport_params->chunks;
3377         }
3378
3379         qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3380         if (!qids)
3381                 return -ENOMEM;
3382
3383         num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3384                                            VIRTCHNL2_QUEUE_TYPE_TX,
3385                                            chunks);
3386         if (num_ids < vport->num_txq) {
3387                 err = -EINVAL;
3388                 goto mem_rel;
3389         }
3390         num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3391                                               VIRTCHNL2_QUEUE_TYPE_TX);
3392         if (num_ids < vport->num_txq) {
3393                 err = -EINVAL;
3394                 goto mem_rel;
3395         }
3396
3397         num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3398                                            VIRTCHNL2_QUEUE_TYPE_RX,
3399                                            chunks);
3400         if (num_ids < vport->num_rxq) {
3401                 err = -EINVAL;
3402                 goto mem_rel;
3403         }
3404         num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3405                                               VIRTCHNL2_QUEUE_TYPE_RX);
3406         if (num_ids < vport->num_rxq) {
3407                 err = -EINVAL;
3408                 goto mem_rel;
3409         }
3410
3411         if (!idpf_is_queue_model_split(vport->txq_model))
3412                 goto check_rxq;
3413
3414         q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3415         num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3416         if (num_ids < vport->num_complq) {
3417                 err = -EINVAL;
3418                 goto mem_rel;
3419         }
3420         num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3421         if (num_ids < vport->num_complq) {
3422                 err = -EINVAL;
3423                 goto mem_rel;
3424         }
3425
3426 check_rxq:
3427         if (!idpf_is_queue_model_split(vport->rxq_model))
3428                 goto mem_rel;
3429
3430         q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3431         num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3432         if (num_ids < vport->num_bufq) {
3433                 err = -EINVAL;
3434                 goto mem_rel;
3435         }
3436         num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3437         if (num_ids < vport->num_bufq)
3438                 err = -EINVAL;
3439
3440 mem_rel:
3441         kfree(qids);
3442
3443         return err;
3444 }
3445
3446 /**
3447  * idpf_vport_adjust_qs - Adjust to new requested queues
3448  * @vport: virtual port data struct
3449  *
3450  * Renegotiate queues.  Returns 0 on success, negative on failure.
3451  */
3452 int idpf_vport_adjust_qs(struct idpf_vport *vport)
3453 {
3454         struct virtchnl2_create_vport vport_msg;
3455         int err;
3456
3457         vport_msg.txq_model = cpu_to_le16(vport->txq_model);
3458         vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
3459         err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
3460                                        NULL);
3461         if (err)
3462                 return err;
3463
3464         idpf_vport_init_num_qs(vport, &vport_msg);
3465         idpf_vport_calc_num_q_groups(vport);
3466
3467         return 0;
3468 }
3469
3470 /**
3471  * idpf_is_capability_ena - Default implementation of capability checking
3472  * @adapter: Private data struct
3473  * @all: all or one flag
3474  * @field: caps field to check for flags
3475  * @flag: flag to check
3476  *
3477  * Return true if all capabilities are supported, false otherwise
3478  */
3479 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
3480                             enum idpf_cap_field field, u64 flag)
3481 {
3482         u8 *caps = (u8 *)&adapter->caps;
3483         u32 *cap_field;
3484
3485         if (!caps)
3486                 return false;
3487
3488         if (field == IDPF_BASE_CAPS)
3489                 return false;
3490
3491         cap_field = (u32 *)(caps + field);
3492
3493         if (all)
3494                 return (*cap_field & flag) == flag;
3495         else
3496                 return !!(*cap_field & flag);
3497 }
3498
3499 /**
3500  * idpf_get_vport_id: Get vport id
3501  * @vport: virtual port structure
3502  *
3503  * Return vport id from the adapter persistent data
3504  */
3505 u32 idpf_get_vport_id(struct idpf_vport *vport)
3506 {
3507         struct virtchnl2_create_vport *vport_msg;
3508
3509         vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3510
3511         return le32_to_cpu(vport_msg->vport_id);
3512 }
3513
3514 /**
3515  * idpf_mac_filter_async_handler - Async callback for mac filters
3516  * @adapter: private data struct
3517  * @xn: transaction for message
3518  * @ctlq_msg: received message
3519  *
3520  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
3521  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
3522  * situation to deal with errors returned on the reply. The best we can
3523  * ultimately do is remove it from our list of mac filters and report the
3524  * error.
3525  */
3526 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
3527                                          struct idpf_vc_xn *xn,
3528                                          const struct idpf_ctlq_msg *ctlq_msg)
3529 {
3530         struct virtchnl2_mac_addr_list *ma_list;
3531         struct idpf_vport_config *vport_config;
3532         struct virtchnl2_mac_addr *mac_addr;
3533         struct idpf_mac_filter *f, *tmp;
3534         struct list_head *ma_list_head;
3535         struct idpf_vport *vport;
3536         u16 num_entries;
3537         int i;
3538
3539         /* if success we're done, we're only here if something bad happened */
3540         if (!ctlq_msg->cookie.mbx.chnl_retval)
3541                 return 0;
3542
3543         /* make sure at least struct is there */
3544         if (xn->reply_sz < sizeof(*ma_list))
3545                 goto invalid_payload;
3546
3547         ma_list = ctlq_msg->ctx.indirect.payload->va;
3548         mac_addr = ma_list->mac_addr_list;
3549         num_entries = le16_to_cpu(ma_list->num_mac_addr);
3550         /* we should have received a buffer at least this big */
3551         if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
3552                 goto invalid_payload;
3553
3554         vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
3555         if (!vport)
3556                 goto invalid_payload;
3557
3558         vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
3559         ma_list_head = &vport_config->user_config.mac_filter_list;
3560
3561         /* We can't do much to reconcile bad filters at this point, however we
3562          * should at least remove them from our list one way or the other so we
3563          * have some idea what good filters we have.
3564          */
3565         spin_lock_bh(&vport_config->mac_filter_list_lock);
3566         list_for_each_entry_safe(f, tmp, ma_list_head, list)
3567                 for (i = 0; i < num_entries; i++)
3568                         if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
3569                                 list_del(&f->list);
3570         spin_unlock_bh(&vport_config->mac_filter_list_lock);
3571         dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
3572                             xn->vc_op);
3573
3574         return 0;
3575
3576 invalid_payload:
3577         dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
3578                             xn->vc_op, xn->reply_sz);
3579
3580         return -EINVAL;
3581 }
3582
3583 /**
3584  * idpf_add_del_mac_filters - Add/del mac filters
3585  * @vport: Virtual port data structure
3586  * @np: Netdev private structure
3587  * @add: Add or delete flag
3588  * @async: Don't wait for return message
3589  *
3590  * Returns 0 on success, error on failure.
3591  **/
3592 int idpf_add_del_mac_filters(struct idpf_vport *vport,
3593                              struct idpf_netdev_priv *np,
3594                              bool add, bool async)
3595 {
3596         struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
3597         struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
3598         struct idpf_adapter *adapter = np->adapter;
3599         struct idpf_vc_xn_params xn_params = {};
3600         struct idpf_vport_config *vport_config;
3601         u32 num_msgs, total_filters = 0;
3602         struct idpf_mac_filter *f;
3603         ssize_t reply_sz;
3604         int i = 0, k;
3605
3606         xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
3607                                 VIRTCHNL2_OP_DEL_MAC_ADDR;
3608         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3609         xn_params.async = async;
3610         xn_params.async_handler = idpf_mac_filter_async_handler;
3611
3612         vport_config = adapter->vport_config[np->vport_idx];
3613         spin_lock_bh(&vport_config->mac_filter_list_lock);
3614
3615         /* Find the number of newly added filters */
3616         list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3617                             list) {
3618                 if (add && f->add)
3619                         total_filters++;
3620                 else if (!add && f->remove)
3621                         total_filters++;
3622         }
3623
3624         if (!total_filters) {
3625                 spin_unlock_bh(&vport_config->mac_filter_list_lock);
3626
3627                 return 0;
3628         }
3629
3630         /* Fill all the new filters into virtchannel message */
3631         mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
3632                            GFP_ATOMIC);
3633         if (!mac_addr) {
3634                 spin_unlock_bh(&vport_config->mac_filter_list_lock);
3635
3636                 return -ENOMEM;
3637         }
3638
3639         list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3640                             list) {
3641                 if (add && f->add) {
3642                         ether_addr_copy(mac_addr[i].addr, f->macaddr);
3643                         i++;
3644                         f->add = false;
3645                         if (i == total_filters)
3646                                 break;
3647                 }
3648                 if (!add && f->remove) {
3649                         ether_addr_copy(mac_addr[i].addr, f->macaddr);
3650                         i++;
3651                         f->remove = false;
3652                         if (i == total_filters)
3653                                 break;
3654                 }
3655         }
3656
3657         spin_unlock_bh(&vport_config->mac_filter_list_lock);
3658
3659         /* Chunk up the filters into multiple messages to avoid
3660          * sending a control queue message buffer that is too large
3661          */
3662         num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
3663
3664         for (i = 0, k = 0; i < num_msgs; i++) {
3665                 u32 entries_size, buf_size, num_entries;
3666
3667                 num_entries = min_t(u32, total_filters,
3668                                     IDPF_NUM_FILTERS_PER_MSG);
3669                 entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
3670                 buf_size = struct_size(ma_list, mac_addr_list, num_entries);
3671
3672                 if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
3673                         kfree(ma_list);
3674                         ma_list = kzalloc(buf_size, GFP_ATOMIC);
3675                         if (!ma_list)
3676                                 return -ENOMEM;
3677                 } else {
3678                         memset(ma_list, 0, buf_size);
3679                 }
3680
3681                 ma_list->vport_id = cpu_to_le32(np->vport_id);
3682                 ma_list->num_mac_addr = cpu_to_le16(num_entries);
3683                 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
3684
3685                 xn_params.send_buf.iov_base = ma_list;
3686                 xn_params.send_buf.iov_len = buf_size;
3687                 reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3688                 if (reply_sz < 0)
3689                         return reply_sz;
3690
3691                 k += num_entries;
3692                 total_filters -= num_entries;
3693         }
3694
3695         return 0;
3696 }
3697
3698 /**
3699  * idpf_set_promiscuous - set promiscuous and send message to mailbox
3700  * @adapter: Driver specific private structure
3701  * @config_data: Vport specific config data
3702  * @vport_id: Vport identifier
3703  *
3704  * Request to enable promiscuous mode for the vport. Message is sent
3705  * asynchronously and won't wait for response.  Returns 0 on success, negative
3706  * on failure;
3707  */
3708 int idpf_set_promiscuous(struct idpf_adapter *adapter,
3709                          struct idpf_vport_user_config_data *config_data,
3710                          u32 vport_id)
3711 {
3712         struct idpf_vc_xn_params xn_params = {};
3713         struct virtchnl2_promisc_info vpi;
3714         ssize_t reply_sz;
3715         u16 flags = 0;
3716
3717         if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
3718                 flags |= VIRTCHNL2_UNICAST_PROMISC;
3719         if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
3720                 flags |= VIRTCHNL2_MULTICAST_PROMISC;
3721
3722         vpi.vport_id = cpu_to_le32(vport_id);
3723         vpi.flags = cpu_to_le16(flags);
3724
3725         xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
3726         xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3727         xn_params.send_buf.iov_base = &vpi;
3728         xn_params.send_buf.iov_len = sizeof(vpi);
3729         /* setting promiscuous is only ever done asynchronously */
3730         xn_params.async = true;
3731         reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3732
3733         return reply_sz < 0 ? reply_sz : 0;
3734 }
This page took 0.26529 seconds and 4 git commands to generate.