]>
Commit | Line | Data |
---|---|---|
c0c050c5 MC |
1 | /* Broadcom NetXtreme-C/E network driver. |
2 | * | |
11f15ed3 | 3 | * Copyright (c) 2014-2016 Broadcom Corporation |
c0c050c5 MC |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/netdevice.h> | |
13 | #include <linux/if_vlan.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/etherdevice.h> | |
16 | #include "bnxt_hsi.h" | |
17 | #include "bnxt.h" | |
18 | #include "bnxt_sriov.h" | |
19 | #include "bnxt_ethtool.h" | |
20 | ||
21 | #ifdef CONFIG_BNXT_SRIOV | |
22 | static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) | |
23 | { | |
caefe526 | 24 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
c0c050c5 MC |
25 | netdev_err(bp->dev, "vf ndo called though PF is down\n"); |
26 | return -EINVAL; | |
27 | } | |
28 | if (!bp->pf.active_vfs) { | |
29 | netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); | |
30 | return -EINVAL; | |
31 | } | |
32 | if (vf_id >= bp->pf.max_vfs) { | |
33 | netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); | |
34 | return -EINVAL; | |
35 | } | |
36 | return 0; | |
37 | } | |
38 | ||
39 | int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) | |
40 | { | |
41 | struct hwrm_func_cfg_input req = {0}; | |
42 | struct bnxt *bp = netdev_priv(dev); | |
43 | struct bnxt_vf_info *vf; | |
44 | bool old_setting = false; | |
45 | u32 func_flags; | |
46 | int rc; | |
47 | ||
48 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
49 | if (rc) | |
50 | return rc; | |
51 | ||
52 | vf = &bp->pf.vf[vf_id]; | |
53 | if (vf->flags & BNXT_VF_SPOOFCHK) | |
54 | old_setting = true; | |
55 | if (old_setting == setting) | |
56 | return 0; | |
57 | ||
58 | func_flags = vf->func_flags; | |
59 | if (setting) | |
60 | func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; | |
61 | else | |
62 | func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; | |
63 | /*TODO: if the driver supports VLAN filter on guest VLAN, | |
64 | * the spoof check should also include vlan anti-spoofing | |
65 | */ | |
66 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 67 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
68 | req.flags = cpu_to_le32(func_flags); |
69 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
70 | if (!rc) { | |
71 | vf->func_flags = func_flags; | |
72 | if (setting) | |
73 | vf->flags |= BNXT_VF_SPOOFCHK; | |
74 | else | |
75 | vf->flags &= ~BNXT_VF_SPOOFCHK; | |
76 | } | |
77 | return rc; | |
78 | } | |
79 | ||
80 | int bnxt_get_vf_config(struct net_device *dev, int vf_id, | |
81 | struct ifla_vf_info *ivi) | |
82 | { | |
83 | struct bnxt *bp = netdev_priv(dev); | |
84 | struct bnxt_vf_info *vf; | |
85 | int rc; | |
86 | ||
87 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
88 | if (rc) | |
89 | return rc; | |
90 | ||
91 | ivi->vf = vf_id; | |
92 | vf = &bp->pf.vf[vf_id]; | |
93 | ||
94 | memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); | |
95 | ivi->max_tx_rate = vf->max_tx_rate; | |
96 | ivi->min_tx_rate = vf->min_tx_rate; | |
97 | ivi->vlan = vf->vlan; | |
98 | ivi->qos = vf->flags & BNXT_VF_QOS; | |
99 | ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK; | |
100 | if (!(vf->flags & BNXT_VF_LINK_FORCED)) | |
101 | ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; | |
102 | else if (vf->flags & BNXT_VF_LINK_UP) | |
103 | ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; | |
104 | else | |
105 | ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; | |
106 | ||
107 | return 0; | |
108 | } | |
109 | ||
110 | int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) | |
111 | { | |
112 | struct hwrm_func_cfg_input req = {0}; | |
113 | struct bnxt *bp = netdev_priv(dev); | |
114 | struct bnxt_vf_info *vf; | |
115 | int rc; | |
116 | ||
117 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
118 | if (rc) | |
119 | return rc; | |
120 | /* reject bc or mc mac addr, zero mac addr means allow | |
121 | * VF to use its own mac addr | |
122 | */ | |
123 | if (is_multicast_ether_addr(mac)) { | |
124 | netdev_err(dev, "Invalid VF ethernet address\n"); | |
125 | return -EINVAL; | |
126 | } | |
127 | vf = &bp->pf.vf[vf_id]; | |
128 | ||
129 | memcpy(vf->mac_addr, mac, ETH_ALEN); | |
130 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 131 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
132 | req.flags = cpu_to_le32(vf->func_flags); |
133 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); | |
134 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); | |
135 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
136 | } | |
137 | ||
138 | int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) | |
139 | { | |
140 | struct hwrm_func_cfg_input req = {0}; | |
141 | struct bnxt *bp = netdev_priv(dev); | |
142 | struct bnxt_vf_info *vf; | |
143 | u16 vlan_tag; | |
144 | int rc; | |
145 | ||
146 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
147 | if (rc) | |
148 | return rc; | |
149 | ||
150 | /* TODO: needed to implement proper handling of user priority, | |
151 | * currently fail the command if there is valid priority | |
152 | */ | |
153 | if (vlan_id > 4095 || qos) | |
154 | return -EINVAL; | |
155 | ||
156 | vf = &bp->pf.vf[vf_id]; | |
157 | vlan_tag = vlan_id; | |
158 | if (vlan_tag == vf->vlan) | |
159 | return 0; | |
160 | ||
161 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 162 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
163 | req.flags = cpu_to_le32(vf->func_flags); |
164 | req.dflt_vlan = cpu_to_le16(vlan_tag); | |
165 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); | |
166 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
167 | if (!rc) | |
168 | vf->vlan = vlan_tag; | |
169 | return rc; | |
170 | } | |
171 | ||
172 | int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, | |
173 | int max_tx_rate) | |
174 | { | |
175 | struct hwrm_func_cfg_input req = {0}; | |
176 | struct bnxt *bp = netdev_priv(dev); | |
177 | struct bnxt_vf_info *vf; | |
178 | u32 pf_link_speed; | |
179 | int rc; | |
180 | ||
181 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
182 | if (rc) | |
183 | return rc; | |
184 | ||
185 | vf = &bp->pf.vf[vf_id]; | |
186 | pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); | |
187 | if (max_tx_rate > pf_link_speed) { | |
188 | netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", | |
189 | max_tx_rate, vf_id); | |
190 | return -EINVAL; | |
191 | } | |
192 | ||
193 | if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { | |
194 | netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", | |
195 | min_tx_rate, vf_id); | |
196 | return -EINVAL; | |
197 | } | |
198 | if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) | |
199 | return 0; | |
200 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 201 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
202 | req.flags = cpu_to_le32(vf->func_flags); |
203 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); | |
204 | req.max_bw = cpu_to_le32(max_tx_rate); | |
205 | req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); | |
206 | req.min_bw = cpu_to_le32(min_tx_rate); | |
207 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
208 | if (!rc) { | |
209 | vf->min_tx_rate = min_tx_rate; | |
210 | vf->max_tx_rate = max_tx_rate; | |
211 | } | |
212 | return rc; | |
213 | } | |
214 | ||
215 | int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) | |
216 | { | |
217 | struct bnxt *bp = netdev_priv(dev); | |
218 | struct bnxt_vf_info *vf; | |
219 | int rc; | |
220 | ||
221 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
222 | if (rc) | |
223 | return rc; | |
224 | ||
225 | vf = &bp->pf.vf[vf_id]; | |
226 | ||
227 | vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); | |
228 | switch (link) { | |
229 | case IFLA_VF_LINK_STATE_AUTO: | |
230 | vf->flags |= BNXT_VF_LINK_UP; | |
231 | break; | |
232 | case IFLA_VF_LINK_STATE_DISABLE: | |
233 | vf->flags |= BNXT_VF_LINK_FORCED; | |
234 | break; | |
235 | case IFLA_VF_LINK_STATE_ENABLE: | |
236 | vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; | |
237 | break; | |
238 | default: | |
239 | netdev_err(bp->dev, "Invalid link option\n"); | |
240 | rc = -EINVAL; | |
241 | break; | |
242 | } | |
243 | /* CHIMP TODO: send msg to VF to update new link state */ | |
244 | ||
245 | return rc; | |
246 | } | |
247 | ||
248 | static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) | |
249 | { | |
250 | int i; | |
251 | struct bnxt_vf_info *vf; | |
252 | ||
253 | for (i = 0; i < num_vfs; i++) { | |
254 | vf = &bp->pf.vf[i]; | |
255 | memset(vf, 0, sizeof(*vf)); | |
256 | vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP; | |
257 | } | |
258 | return 0; | |
259 | } | |
260 | ||
4bb6cdce | 261 | static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) |
c0c050c5 MC |
262 | { |
263 | int i, rc = 0; | |
264 | struct bnxt_pf_info *pf = &bp->pf; | |
265 | struct hwrm_func_vf_resc_free_input req = {0}; | |
266 | ||
267 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); | |
268 | ||
269 | mutex_lock(&bp->hwrm_cmd_lock); | |
4bb6cdce | 270 | for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { |
c0c050c5 MC |
271 | req.vf_id = cpu_to_le16(i); |
272 | rc = _hwrm_send_message(bp, &req, sizeof(req), | |
273 | HWRM_CMD_TIMEOUT); | |
274 | if (rc) | |
275 | break; | |
276 | } | |
277 | mutex_unlock(&bp->hwrm_cmd_lock); | |
278 | return rc; | |
279 | } | |
280 | ||
281 | static void bnxt_free_vf_resources(struct bnxt *bp) | |
282 | { | |
283 | struct pci_dev *pdev = bp->pdev; | |
284 | int i; | |
285 | ||
286 | kfree(bp->pf.vf_event_bmap); | |
287 | bp->pf.vf_event_bmap = NULL; | |
288 | ||
289 | for (i = 0; i < 4; i++) { | |
290 | if (bp->pf.hwrm_cmd_req_addr[i]) { | |
291 | dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, | |
292 | bp->pf.hwrm_cmd_req_addr[i], | |
293 | bp->pf.hwrm_cmd_req_dma_addr[i]); | |
294 | bp->pf.hwrm_cmd_req_addr[i] = NULL; | |
295 | } | |
296 | } | |
297 | ||
298 | kfree(bp->pf.vf); | |
299 | bp->pf.vf = NULL; | |
300 | } | |
301 | ||
302 | static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) | |
303 | { | |
304 | struct pci_dev *pdev = bp->pdev; | |
305 | u32 nr_pages, size, i, j, k = 0; | |
306 | ||
307 | bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); | |
308 | if (!bp->pf.vf) | |
309 | return -ENOMEM; | |
310 | ||
311 | bnxt_set_vf_attr(bp, num_vfs); | |
312 | ||
313 | size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; | |
314 | nr_pages = size / BNXT_PAGE_SIZE; | |
315 | if (size & (BNXT_PAGE_SIZE - 1)) | |
316 | nr_pages++; | |
317 | ||
318 | for (i = 0; i < nr_pages; i++) { | |
319 | bp->pf.hwrm_cmd_req_addr[i] = | |
320 | dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, | |
321 | &bp->pf.hwrm_cmd_req_dma_addr[i], | |
322 | GFP_KERNEL); | |
323 | ||
324 | if (!bp->pf.hwrm_cmd_req_addr[i]) | |
325 | return -ENOMEM; | |
326 | ||
327 | for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { | |
328 | struct bnxt_vf_info *vf = &bp->pf.vf[k]; | |
329 | ||
330 | vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + | |
331 | j * BNXT_HWRM_REQ_MAX_SIZE; | |
332 | vf->hwrm_cmd_req_dma_addr = | |
333 | bp->pf.hwrm_cmd_req_dma_addr[i] + j * | |
334 | BNXT_HWRM_REQ_MAX_SIZE; | |
335 | k++; | |
336 | } | |
337 | } | |
338 | ||
339 | /* Max 128 VF's */ | |
340 | bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); | |
341 | if (!bp->pf.vf_event_bmap) | |
342 | return -ENOMEM; | |
343 | ||
344 | bp->pf.hwrm_cmd_req_pages = nr_pages; | |
345 | return 0; | |
346 | } | |
347 | ||
348 | static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) | |
349 | { | |
350 | struct hwrm_func_buf_rgtr_input req = {0}; | |
351 | ||
352 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); | |
353 | ||
354 | req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); | |
355 | req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); | |
356 | req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); | |
357 | req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); | |
358 | req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); | |
359 | req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); | |
360 | req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); | |
361 | ||
362 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
363 | } | |
364 | ||
365 | /* only call by PF to reserve resources for VF */ | |
92268c32 | 366 | static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) |
c0c050c5 MC |
367 | { |
368 | u32 rc = 0, mtu, i; | |
369 | u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; | |
b72d4a68 | 370 | u16 vf_ring_grps; |
c0c050c5 MC |
371 | struct hwrm_func_cfg_input req = {0}; |
372 | struct bnxt_pf_info *pf = &bp->pf; | |
373 | ||
374 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
375 | ||
376 | /* Remaining rings are distributed equally amongs VF's for now */ | |
377 | /* TODO: the following workaroud is needed to restrict total number | |
378 | * of vf_cp_rings not exceed number of HW ring groups. This WA should | |
379 | * be removed once new HWRM provides HW ring groups capability in | |
380 | * hwrm_func_qcap. | |
381 | */ | |
92268c32 MC |
382 | vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs); |
383 | vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs; | |
c0c050c5 | 384 | /* TODO: restore this logic below once the WA above is removed */ |
92268c32 MC |
385 | /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */ |
386 | vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; | |
c0c050c5 | 387 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
92268c32 MC |
388 | vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / |
389 | num_vfs; | |
c0c050c5 | 390 | else |
92268c32 | 391 | vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; |
b72d4a68 | 392 | vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; |
92268c32 | 393 | vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; |
c0c050c5 MC |
394 | |
395 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | | |
396 | FUNC_CFG_REQ_ENABLES_MRU | | |
397 | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | | |
398 | FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | | |
399 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | |
400 | FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | | |
401 | FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | | |
402 | FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | | |
b72d4a68 MC |
403 | FUNC_CFG_REQ_ENABLES_NUM_VNICS | |
404 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); | |
c0c050c5 MC |
405 | |
406 | mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | |
407 | req.mru = cpu_to_le16(mtu); | |
408 | req.mtu = cpu_to_le16(mtu); | |
409 | ||
410 | req.num_rsscos_ctxs = cpu_to_le16(1); | |
411 | req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); | |
412 | req.num_tx_rings = cpu_to_le16(vf_tx_rings); | |
413 | req.num_rx_rings = cpu_to_le16(vf_rx_rings); | |
b72d4a68 | 414 | req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); |
c0c050c5 MC |
415 | req.num_l2_ctxs = cpu_to_le16(4); |
416 | vf_vnics = 1; | |
417 | ||
418 | req.num_vnics = cpu_to_le16(vf_vnics); | |
419 | /* FIXME spec currently uses 1 bit for stats ctx */ | |
420 | req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); | |
421 | ||
422 | mutex_lock(&bp->hwrm_cmd_lock); | |
92268c32 | 423 | for (i = 0; i < num_vfs; i++) { |
c193554e | 424 | req.fid = cpu_to_le16(pf->first_vf_id + i); |
c0c050c5 MC |
425 | rc = _hwrm_send_message(bp, &req, sizeof(req), |
426 | HWRM_CMD_TIMEOUT); | |
427 | if (rc) | |
428 | break; | |
92268c32 | 429 | pf->active_vfs = i + 1; |
c193554e | 430 | pf->vf[i].fw_fid = le16_to_cpu(req.fid); |
c0c050c5 MC |
431 | } |
432 | mutex_unlock(&bp->hwrm_cmd_lock); | |
433 | if (!rc) { | |
4a21b49b MC |
434 | pf->max_tx_rings -= vf_tx_rings * num_vfs; |
435 | pf->max_rx_rings -= vf_rx_rings * num_vfs; | |
b72d4a68 | 436 | pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; |
4a21b49b MC |
437 | pf->max_cp_rings -= vf_cp_rings * num_vfs; |
438 | pf->max_rsscos_ctxs -= num_vfs; | |
439 | pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; | |
440 | pf->max_vnics -= vf_vnics * num_vfs; | |
c0c050c5 MC |
441 | } |
442 | return rc; | |
443 | } | |
444 | ||
445 | static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |
446 | { | |
447 | int rc = 0, vfs_supported; | |
448 | int min_rx_rings, min_tx_rings, min_rss_ctxs; | |
449 | int tx_ok = 0, rx_ok = 0, rss_ok = 0; | |
450 | ||
451 | /* Check if we can enable requested num of vf's. At a mininum | |
452 | * we require 1 RX 1 TX rings for each VF. In this minimum conf | |
453 | * features like TPA will not be available. | |
454 | */ | |
455 | vfs_supported = *num_vfs; | |
456 | ||
457 | while (vfs_supported) { | |
458 | min_rx_rings = vfs_supported; | |
459 | min_tx_rings = vfs_supported; | |
460 | min_rss_ctxs = vfs_supported; | |
461 | ||
462 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { | |
463 | if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= | |
464 | min_rx_rings) | |
465 | rx_ok = 1; | |
466 | } else { | |
467 | if (bp->pf.max_rx_rings - bp->rx_nr_rings >= | |
468 | min_rx_rings) | |
469 | rx_ok = 1; | |
470 | } | |
471 | ||
472 | if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) | |
473 | tx_ok = 1; | |
474 | ||
475 | if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) | |
476 | rss_ok = 1; | |
477 | ||
478 | if (tx_ok && rx_ok && rss_ok) | |
479 | break; | |
480 | ||
481 | vfs_supported--; | |
482 | } | |
483 | ||
484 | if (!vfs_supported) { | |
485 | netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); | |
486 | return -EINVAL; | |
487 | } | |
488 | ||
489 | if (vfs_supported != *num_vfs) { | |
490 | netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", | |
491 | *num_vfs, vfs_supported); | |
492 | *num_vfs = vfs_supported; | |
493 | } | |
494 | ||
495 | rc = bnxt_alloc_vf_resources(bp, *num_vfs); | |
496 | if (rc) | |
497 | goto err_out1; | |
498 | ||
499 | /* Reserve resources for VFs */ | |
92268c32 | 500 | rc = bnxt_hwrm_func_cfg(bp, *num_vfs); |
c0c050c5 MC |
501 | if (rc) |
502 | goto err_out2; | |
503 | ||
504 | /* Register buffers for VFs */ | |
505 | rc = bnxt_hwrm_func_buf_rgtr(bp); | |
506 | if (rc) | |
507 | goto err_out2; | |
508 | ||
509 | rc = pci_enable_sriov(bp->pdev, *num_vfs); | |
510 | if (rc) | |
511 | goto err_out2; | |
512 | ||
513 | return 0; | |
514 | ||
515 | err_out2: | |
516 | /* Free the resources reserved for various VF's */ | |
4bb6cdce | 517 | bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); |
c0c050c5 MC |
518 | |
519 | err_out1: | |
520 | bnxt_free_vf_resources(bp); | |
521 | ||
522 | return rc; | |
523 | } | |
524 | ||
19241368 JH |
525 | static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, |
526 | struct bnxt_vf_info *vf, | |
527 | u16 event_id) | |
528 | { | |
529 | int rc = 0; | |
530 | struct hwrm_fwd_async_event_cmpl_input req = {0}; | |
531 | struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; | |
532 | struct hwrm_async_event_cmpl *async_cmpl; | |
533 | ||
534 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); | |
535 | if (vf) | |
536 | req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); | |
537 | else | |
538 | /* broadcast this async event to all VFs */ | |
539 | req.encap_async_event_target_id = cpu_to_le16(0xffff); | |
540 | async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; | |
541 | async_cmpl->type = | |
542 | cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); | |
543 | async_cmpl->event_id = cpu_to_le16(event_id); | |
544 | ||
545 | mutex_lock(&bp->hwrm_cmd_lock); | |
546 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
547 | ||
548 | if (rc) { | |
549 | netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", | |
550 | rc); | |
551 | goto fwd_async_event_cmpl_exit; | |
552 | } | |
553 | ||
554 | if (resp->error_code) { | |
555 | netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", | |
556 | resp->error_code); | |
557 | rc = -1; | |
558 | } | |
559 | ||
560 | fwd_async_event_cmpl_exit: | |
561 | mutex_unlock(&bp->hwrm_cmd_lock); | |
562 | return rc; | |
563 | } | |
564 | ||
c0c050c5 MC |
565 | void bnxt_sriov_disable(struct bnxt *bp) |
566 | { | |
4bb6cdce | 567 | u16 num_vfs = pci_num_vf(bp->pdev); |
c0c050c5 | 568 | |
4bb6cdce JH |
569 | if (!num_vfs) |
570 | return; | |
c0c050c5 | 571 | |
4bb6cdce | 572 | if (pci_vfs_assigned(bp->pdev)) { |
19241368 JH |
573 | bnxt_hwrm_fwd_async_event_cmpl( |
574 | bp, NULL, | |
575 | HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); | |
4bb6cdce JH |
576 | netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", |
577 | num_vfs); | |
578 | } else { | |
579 | pci_disable_sriov(bp->pdev); | |
580 | /* Free the HW resources reserved for various VF's */ | |
581 | bnxt_hwrm_func_vf_resource_free(bp, num_vfs); | |
582 | } | |
c0c050c5 MC |
583 | |
584 | bnxt_free_vf_resources(bp); | |
585 | ||
586 | bp->pf.active_vfs = 0; | |
4a21b49b MC |
587 | /* Reclaim all resources for the PF. */ |
588 | bnxt_hwrm_func_qcaps(bp); | |
c0c050c5 MC |
589 | } |
590 | ||
591 | int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) | |
592 | { | |
593 | struct net_device *dev = pci_get_drvdata(pdev); | |
594 | struct bnxt *bp = netdev_priv(dev); | |
595 | ||
596 | if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { | |
597 | netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); | |
598 | return 0; | |
599 | } | |
600 | ||
601 | rtnl_lock(); | |
602 | if (!netif_running(dev)) { | |
603 | netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); | |
604 | rtnl_unlock(); | |
605 | return 0; | |
606 | } | |
607 | bp->sriov_cfg = true; | |
608 | rtnl_unlock(); | |
4bb6cdce JH |
609 | |
610 | if (pci_vfs_assigned(bp->pdev)) { | |
611 | netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); | |
612 | num_vfs = 0; | |
613 | goto sriov_cfg_exit; | |
c0c050c5 MC |
614 | } |
615 | ||
616 | /* Check if enabled VFs is same as requested */ | |
4bb6cdce JH |
617 | if (num_vfs && num_vfs == bp->pf.active_vfs) |
618 | goto sriov_cfg_exit; | |
619 | ||
620 | /* if there are previous existing VFs, clean them up */ | |
621 | bnxt_sriov_disable(bp); | |
622 | if (!num_vfs) | |
623 | goto sriov_cfg_exit; | |
c0c050c5 MC |
624 | |
625 | bnxt_sriov_enable(bp, &num_vfs); | |
626 | ||
4bb6cdce | 627 | sriov_cfg_exit: |
c0c050c5 MC |
628 | bp->sriov_cfg = false; |
629 | wake_up(&bp->sriov_cfg_wait); | |
630 | ||
631 | return num_vfs; | |
632 | } | |
633 | ||
634 | static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, | |
635 | void *encap_resp, __le64 encap_resp_addr, | |
636 | __le16 encap_resp_cpr, u32 msg_size) | |
637 | { | |
638 | int rc = 0; | |
639 | struct hwrm_fwd_resp_input req = {0}; | |
640 | struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; | |
641 | ||
642 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); | |
643 | ||
644 | /* Set the new target id */ | |
645 | req.target_id = cpu_to_le16(vf->fw_fid); | |
c193554e | 646 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
647 | req.encap_resp_len = cpu_to_le16(msg_size); |
648 | req.encap_resp_addr = encap_resp_addr; | |
649 | req.encap_resp_cmpl_ring = encap_resp_cpr; | |
650 | memcpy(req.encap_resp, encap_resp, msg_size); | |
651 | ||
652 | mutex_lock(&bp->hwrm_cmd_lock); | |
653 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
654 | ||
655 | if (rc) { | |
656 | netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); | |
657 | goto fwd_resp_exit; | |
658 | } | |
659 | ||
660 | if (resp->error_code) { | |
661 | netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", | |
662 | resp->error_code); | |
663 | rc = -1; | |
664 | } | |
665 | ||
666 | fwd_resp_exit: | |
667 | mutex_unlock(&bp->hwrm_cmd_lock); | |
668 | return rc; | |
669 | } | |
670 | ||
671 | static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, | |
672 | u32 msg_size) | |
673 | { | |
674 | int rc = 0; | |
675 | struct hwrm_reject_fwd_resp_input req = {0}; | |
676 | struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; | |
677 | ||
678 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); | |
679 | /* Set the new target id */ | |
680 | req.target_id = cpu_to_le16(vf->fw_fid); | |
c193554e | 681 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
682 | memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); |
683 | ||
684 | mutex_lock(&bp->hwrm_cmd_lock); | |
685 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
686 | ||
687 | if (rc) { | |
688 | netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); | |
689 | goto fwd_err_resp_exit; | |
690 | } | |
691 | ||
692 | if (resp->error_code) { | |
693 | netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", | |
694 | resp->error_code); | |
695 | rc = -1; | |
696 | } | |
697 | ||
698 | fwd_err_resp_exit: | |
699 | mutex_unlock(&bp->hwrm_cmd_lock); | |
700 | return rc; | |
701 | } | |
702 | ||
703 | static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, | |
704 | u32 msg_size) | |
705 | { | |
706 | int rc = 0; | |
707 | struct hwrm_exec_fwd_resp_input req = {0}; | |
708 | struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; | |
709 | ||
710 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); | |
711 | /* Set the new target id */ | |
712 | req.target_id = cpu_to_le16(vf->fw_fid); | |
c193554e | 713 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
714 | memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); |
715 | ||
716 | mutex_lock(&bp->hwrm_cmd_lock); | |
717 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
718 | ||
719 | if (rc) { | |
720 | netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); | |
721 | goto exec_fwd_resp_exit; | |
722 | } | |
723 | ||
724 | if (resp->error_code) { | |
725 | netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", | |
726 | resp->error_code); | |
727 | rc = -1; | |
728 | } | |
729 | ||
730 | exec_fwd_resp_exit: | |
731 | mutex_unlock(&bp->hwrm_cmd_lock); | |
732 | return rc; | |
733 | } | |
734 | ||
735 | static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) | |
736 | { | |
737 | u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); | |
738 | struct hwrm_cfa_l2_filter_alloc_input *req = | |
739 | (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; | |
740 | ||
741 | if (!is_valid_ether_addr(vf->mac_addr) || | |
742 | ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) | |
743 | return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); | |
744 | else | |
745 | return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); | |
746 | } | |
747 | ||
748 | static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) | |
749 | { | |
750 | int rc = 0; | |
751 | ||
752 | if (!(vf->flags & BNXT_VF_LINK_FORCED)) { | |
753 | /* real link */ | |
754 | rc = bnxt_hwrm_exec_fwd_resp( | |
755 | bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); | |
756 | } else { | |
757 | struct hwrm_port_phy_qcfg_output phy_qcfg_resp; | |
758 | struct hwrm_port_phy_qcfg_input *phy_qcfg_req; | |
759 | ||
760 | phy_qcfg_req = | |
761 | (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; | |
762 | mutex_lock(&bp->hwrm_cmd_lock); | |
763 | memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, | |
764 | sizeof(phy_qcfg_resp)); | |
765 | mutex_unlock(&bp->hwrm_cmd_lock); | |
766 | phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; | |
767 | ||
768 | if (vf->flags & BNXT_VF_LINK_UP) { | |
769 | /* if physical link is down, force link up on VF */ | |
770 | if (phy_qcfg_resp.link == | |
771 | PORT_PHY_QCFG_RESP_LINK_NO_LINK) { | |
772 | phy_qcfg_resp.link = | |
773 | PORT_PHY_QCFG_RESP_LINK_LINK; | |
11f15ed3 MC |
774 | phy_qcfg_resp.link_speed = cpu_to_le16( |
775 | PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); | |
c0c050c5 MC |
776 | phy_qcfg_resp.duplex = |
777 | PORT_PHY_QCFG_RESP_DUPLEX_FULL; | |
778 | phy_qcfg_resp.pause = | |
779 | (PORT_PHY_QCFG_RESP_PAUSE_TX | | |
780 | PORT_PHY_QCFG_RESP_PAUSE_RX); | |
781 | } | |
782 | } else { | |
783 | /* force link down */ | |
784 | phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; | |
785 | phy_qcfg_resp.link_speed = 0; | |
786 | phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; | |
787 | phy_qcfg_resp.pause = 0; | |
788 | } | |
789 | rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, | |
790 | phy_qcfg_req->resp_addr, | |
791 | phy_qcfg_req->cmpl_ring, | |
792 | sizeof(phy_qcfg_resp)); | |
793 | } | |
794 | return rc; | |
795 | } | |
796 | ||
797 | static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) | |
798 | { | |
799 | int rc = 0; | |
a8643e16 MC |
800 | struct input *encap_req = vf->hwrm_cmd_req_addr; |
801 | u32 req_type = le16_to_cpu(encap_req->req_type); | |
c0c050c5 MC |
802 | |
803 | switch (req_type) { | |
804 | case HWRM_CFA_L2_FILTER_ALLOC: | |
805 | rc = bnxt_vf_validate_set_mac(bp, vf); | |
806 | break; | |
807 | case HWRM_FUNC_CFG: | |
808 | /* TODO Validate if VF is allowed to change mac address, | |
809 | * mtu, num of rings etc | |
810 | */ | |
811 | rc = bnxt_hwrm_exec_fwd_resp( | |
812 | bp, vf, sizeof(struct hwrm_func_cfg_input)); | |
813 | break; | |
814 | case HWRM_PORT_PHY_QCFG: | |
815 | rc = bnxt_vf_set_link(bp, vf); | |
816 | break; | |
817 | default: | |
818 | break; | |
819 | } | |
820 | return rc; | |
821 | } | |
822 | ||
823 | void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) | |
824 | { | |
825 | u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; | |
826 | ||
827 | /* Scan through VF's and process commands */ | |
828 | while (1) { | |
829 | vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); | |
830 | if (vf_id >= active_vfs) | |
831 | break; | |
832 | ||
833 | clear_bit(vf_id, bp->pf.vf_event_bmap); | |
834 | bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); | |
835 | i = vf_id + 1; | |
836 | } | |
837 | } | |
379a80a1 MC |
838 | |
839 | void bnxt_update_vf_mac(struct bnxt *bp) | |
840 | { | |
841 | struct hwrm_func_qcaps_input req = {0}; | |
842 | struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; | |
843 | ||
844 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); | |
845 | req.fid = cpu_to_le16(0xffff); | |
846 | ||
847 | mutex_lock(&bp->hwrm_cmd_lock); | |
848 | if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) | |
849 | goto update_vf_mac_exit; | |
850 | ||
3874d6a8 JH |
851 | /* Store MAC address from the firmware. There are 2 cases: |
852 | * 1. MAC address is valid. It is assigned from the PF and we | |
853 | * need to override the current VF MAC address with it. | |
854 | * 2. MAC address is zero. The VF will use a random MAC address by | |
855 | * default but the stored zero MAC will allow the VF user to change | |
856 | * the random MAC address using ndo_set_mac_address() if he wants. | |
857 | */ | |
11f15ed3 MC |
858 | if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) |
859 | memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); | |
3874d6a8 JH |
860 | |
861 | /* overwrite netdev dev_addr with admin VF MAC */ | |
862 | if (is_valid_ether_addr(bp->vf.mac_addr)) | |
863 | memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); | |
379a80a1 MC |
864 | update_vf_mac_exit: |
865 | mutex_unlock(&bp->hwrm_cmd_lock); | |
866 | } | |
867 | ||
c0c050c5 MC |
868 | #else |
869 | ||
870 | void bnxt_sriov_disable(struct bnxt *bp) | |
871 | { | |
872 | } | |
873 | ||
874 | void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) | |
875 | { | |
379a80a1 MC |
876 | netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); |
877 | } | |
878 | ||
879 | void bnxt_update_vf_mac(struct bnxt *bp) | |
880 | { | |
c0c050c5 MC |
881 | } |
882 | #endif |