1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
9 #include "ice_virtchnl_allowlist.h"
11 #define FIELD_SELECTOR(proto_hdr_field) \
12 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
14 struct ice_vc_hdr_match_type {
15 u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
16 u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
19 static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
20 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
21 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
22 ICE_FLOW_SEG_HDR_IPV_OTHER},
23 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
24 ICE_FLOW_SEG_HDR_IPV_OTHER},
25 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
26 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
27 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
30 static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
31 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
32 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
33 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
34 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
35 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
36 ICE_FLOW_SEG_HDR_IPV_OTHER},
37 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
38 ICE_FLOW_SEG_HDR_IPV_OTHER},
39 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
40 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
41 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
42 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
43 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
44 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
45 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
46 ICE_FLOW_SEG_HDR_GTPU_DWN},
47 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
48 ICE_FLOW_SEG_HDR_GTPU_UP},
49 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
50 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
51 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
52 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
55 struct ice_vc_hash_field_match_type {
56 u32 vc_hdr; /* virtchnl headers
57 * (VIRTCHNL_PROTO_HDR_XXX)
59 u32 vc_hash_field; /* virtchnl hash fields selector
60 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
62 u64 ice_hash_field; /* ice hash fields
63 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
68 ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
69 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
70 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
71 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
72 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
73 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
76 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
78 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
79 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
80 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
81 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
83 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
84 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
85 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
86 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
87 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
88 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
91 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
92 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
94 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
97 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
98 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
100 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
101 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
102 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
104 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
105 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
106 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
107 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
108 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
109 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 {VIRTCHNL_PROTO_HDR_TCP,
112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
113 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
114 {VIRTCHNL_PROTO_HDR_TCP,
115 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
116 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
117 {VIRTCHNL_PROTO_HDR_TCP,
118 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
119 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
120 ICE_FLOW_HASH_TCP_PORT},
121 {VIRTCHNL_PROTO_HDR_UDP,
122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
123 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
124 {VIRTCHNL_PROTO_HDR_UDP,
125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
126 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
127 {VIRTCHNL_PROTO_HDR_UDP,
128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
130 ICE_FLOW_HASH_UDP_PORT},
131 {VIRTCHNL_PROTO_HDR_SCTP,
132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
133 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
134 {VIRTCHNL_PROTO_HDR_SCTP,
135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
136 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
137 {VIRTCHNL_PROTO_HDR_SCTP,
138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
140 ICE_FLOW_HASH_SCTP_PORT},
144 ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
145 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
146 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
147 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
148 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
149 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
150 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
152 {VIRTCHNL_PROTO_HDR_ETH,
153 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
154 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
155 {VIRTCHNL_PROTO_HDR_S_VLAN,
156 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
157 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
158 {VIRTCHNL_PROTO_HDR_C_VLAN,
159 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
160 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
161 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
162 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
163 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
164 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
165 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
166 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
168 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
169 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
170 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
171 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
172 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
173 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
174 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
175 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
176 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
177 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
178 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
179 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
180 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
181 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
182 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
183 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
184 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
185 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
186 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
187 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
189 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
190 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
191 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
192 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
193 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
194 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
195 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
196 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
197 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
198 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
199 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
200 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
201 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
202 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
203 {VIRTCHNL_PROTO_HDR_TCP,
204 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
205 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
206 {VIRTCHNL_PROTO_HDR_TCP,
207 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
208 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
209 {VIRTCHNL_PROTO_HDR_TCP,
210 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
211 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
212 ICE_FLOW_HASH_TCP_PORT},
213 {VIRTCHNL_PROTO_HDR_UDP,
214 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
215 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
216 {VIRTCHNL_PROTO_HDR_UDP,
217 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
218 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
219 {VIRTCHNL_PROTO_HDR_UDP,
220 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
221 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
222 ICE_FLOW_HASH_UDP_PORT},
223 {VIRTCHNL_PROTO_HDR_SCTP,
224 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
225 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
226 {VIRTCHNL_PROTO_HDR_SCTP,
227 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
228 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
229 {VIRTCHNL_PROTO_HDR_SCTP,
230 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
231 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
232 ICE_FLOW_HASH_SCTP_PORT},
233 {VIRTCHNL_PROTO_HDR_PPPOE,
234 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
235 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
236 {VIRTCHNL_PROTO_HDR_GTPU_IP,
237 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
238 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
239 {VIRTCHNL_PROTO_HDR_L2TPV3,
240 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
241 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
242 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
243 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
244 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
245 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
246 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
247 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
251 * ice_get_vf_vsi - get VF's VSI based on the stored index
252 * @vf: VF used to get VSI
254 static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
256 return vf->pf->vsi[vf->lan_vsi_idx];
260 * ice_validate_vf_id - helper to check if VF ID is valid
261 * @pf: pointer to the PF structure
262 * @vf_id: the ID of the VF to check
264 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
266 /* vf_id range is only valid for 0-255, and should always be unsigned */
267 if (vf_id >= pf->num_alloc_vfs) {
268 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
275 * ice_check_vf_init - helper to check if VF init complete
276 * @pf: pointer to the PF structure
277 * @vf: the pointer to the VF to check
279 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
281 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
282 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
290 * ice_err_to_virt_err - translate errors for VF return code
291 * @ice_err: error return code
293 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
297 return VIRTCHNL_STATUS_SUCCESS;
298 case ICE_ERR_BAD_PTR:
299 case ICE_ERR_INVAL_SIZE:
300 case ICE_ERR_DEVICE_NOT_SUPPORTED:
303 return VIRTCHNL_STATUS_ERR_PARAM;
304 case ICE_ERR_NO_MEMORY:
305 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
306 case ICE_ERR_NOT_READY:
307 case ICE_ERR_RESET_FAILED:
308 case ICE_ERR_FW_API_VER:
309 case ICE_ERR_AQ_ERROR:
310 case ICE_ERR_AQ_TIMEOUT:
311 case ICE_ERR_AQ_FULL:
312 case ICE_ERR_AQ_NO_WORK:
313 case ICE_ERR_AQ_EMPTY:
314 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
316 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
321 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
322 * @pf: pointer to the PF structure
323 * @v_opcode: operation code
324 * @v_retval: return value
325 * @msg: pointer to the msg buffer
326 * @msglen: msg length
329 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
330 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
332 struct ice_hw *hw = &pf->hw;
335 ice_for_each_vf(pf, i) {
336 struct ice_vf *vf = &pf->vf[i];
338 /* Not all vfs are enabled so skip the ones that are not */
339 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
340 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
343 /* Ignore return value on purpose - a given VF may fail, but
344 * we need to keep going and send to all of them
346 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
352 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
353 * @vf: pointer to the VF structure
354 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
355 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
356 * @link_up: whether or not to set the link up/down
359 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
360 int ice_link_speed, bool link_up)
362 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
363 pfe->event_data.link_event_adv.link_status = link_up;
365 pfe->event_data.link_event_adv.link_speed =
366 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
368 pfe->event_data.link_event.link_status = link_up;
369 /* Legacy method for virtchnl link speeds */
370 pfe->event_data.link_event.link_speed =
371 (enum virtchnl_link_speed)
372 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
377 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
378 * @vf: the VF to check
380 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
383 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
385 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
386 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
390 * ice_is_vf_link_up - check if the VF's link is up
391 * @vf: VF to check if link is up
393 static bool ice_is_vf_link_up(struct ice_vf *vf)
395 struct ice_pf *pf = vf->pf;
397 if (ice_check_vf_init(pf, vf))
400 if (ice_vf_has_no_qs_ena(vf))
402 else if (vf->link_forced)
405 return pf->hw.port_info->phy.link_info.link_info &
410 * ice_vc_notify_vf_link_state - Inform a VF of link status
411 * @vf: pointer to the VF structure
413 * send a link status message to a single VF
415 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
417 struct virtchnl_pf_event pfe = { 0 };
418 struct ice_hw *hw = &vf->pf->hw;
420 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
421 pfe.severity = PF_EVENT_SEVERITY_INFO;
423 if (ice_is_vf_link_up(vf))
424 ice_set_pfe_link(vf, &pfe,
425 hw->port_info->phy.link_info.link_speed, true);
427 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
429 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
430 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
435 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
436 * @vf: VF to remove access to VSI for
438 static void ice_vf_invalidate_vsi(struct ice_vf *vf)
440 vf->lan_vsi_idx = ICE_NO_VSI;
441 vf->lan_vsi_num = ICE_NO_VSI;
445 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
446 * @vf: invalidate this VF's VSI after freeing it
448 static void ice_vf_vsi_release(struct ice_vf *vf)
450 ice_vsi_release(ice_get_vf_vsi(vf));
451 ice_vf_invalidate_vsi(vf);
455 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
456 * @vf: VF that control VSI is being invalidated on
458 static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
460 vf->ctrl_vsi_idx = ICE_NO_VSI;
464 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
465 * @vf: VF that control VSI is being released on
467 static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
469 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
470 ice_vf_ctrl_invalidate_vsi(vf);
474 * ice_free_vf_res - Free a VF's resources
475 * @vf: pointer to the VF info
477 static void ice_free_vf_res(struct ice_vf *vf)
479 struct ice_pf *pf = vf->pf;
480 int i, last_vector_idx;
482 /* First, disable VF's configuration API to prevent OS from
483 * accessing the VF's VSI after it's freed or invalidated.
485 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
486 ice_vf_fdir_exit(vf);
487 /* free VF control VSI */
488 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
489 ice_vf_ctrl_vsi_release(vf);
491 /* free VSI and disconnect it from the parent uplink */
492 if (vf->lan_vsi_idx != ICE_NO_VSI) {
493 ice_vf_vsi_release(vf);
497 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
499 /* clear VF MDD event information */
500 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
501 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
503 /* Disable interrupts so that VF starts in a known state */
504 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
505 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
508 /* reset some of the state variables keeping track of the resources */
509 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
510 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
514 * ice_dis_vf_mappings
515 * @vf: pointer to the VF structure
517 static void ice_dis_vf_mappings(struct ice_vf *vf)
519 struct ice_pf *pf = vf->pf;
526 vsi = ice_get_vf_vsi(vf);
528 dev = ice_pf_to_dev(pf);
529 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
530 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
532 first = vf->first_vector_idx;
533 last = first + pf->num_msix_per_vf - 1;
534 for (v = first; v <= last; v++) {
537 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
538 GLINT_VECT2FUNC_IS_PF_M) |
539 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
540 GLINT_VECT2FUNC_PF_NUM_M));
541 wr32(hw, GLINT_VECT2FUNC(v), reg);
544 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
545 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
547 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
549 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
550 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
552 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
556 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
557 * @pf: pointer to the PF structure
559 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
560 * the pf->sriov_base_vector.
562 * Returns 0 on success, and -EINVAL on error.
564 static int ice_sriov_free_msix_res(struct ice_pf *pf)
566 struct ice_res_tracker *res;
571 res = pf->irq_tracker;
575 /* give back irq_tracker resources used */
576 WARN_ON(pf->sriov_base_vector < res->num_entries);
578 pf->sriov_base_vector = 0;
584 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
585 * @vf: pointer to the VF structure
587 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
589 /* Clear Rx/Tx enabled queues flag */
590 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
591 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
592 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
596 * ice_dis_vf_qs - Disable the VF queues
597 * @vf: pointer to the VF structure
599 static void ice_dis_vf_qs(struct ice_vf *vf)
601 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
603 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
604 ice_vsi_stop_all_rx_rings(vsi);
605 ice_set_vf_state_qs_dis(vf);
609 * ice_free_vfs - Free all VFs
610 * @pf: pointer to the PF structure
612 void ice_free_vfs(struct ice_pf *pf)
614 struct device *dev = ice_pf_to_dev(pf);
615 struct ice_hw *hw = &pf->hw;
621 while (test_and_set_bit(ICE_VF_DIS, pf->state))
622 usleep_range(1000, 2000);
624 /* Disable IOV before freeing resources. This lets any VF drivers
625 * running in the host get themselves cleaned up before we yank
626 * the carpet out from underneath their feet.
628 if (!pci_vfs_assigned(pf->pdev))
629 pci_disable_sriov(pf->pdev);
631 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
633 /* Avoid wait time by stopping all VFs at the same time */
634 ice_for_each_vf(pf, i)
635 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
636 ice_dis_vf_qs(&pf->vf[i]);
638 tmp = pf->num_alloc_vfs;
639 pf->num_qps_per_vf = 0;
640 pf->num_alloc_vfs = 0;
641 for (i = 0; i < tmp; i++) {
642 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
643 /* disable VF qp mappings and set VF disable state */
644 ice_dis_vf_mappings(&pf->vf[i]);
645 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
646 ice_free_vf_res(&pf->vf[i]);
650 if (ice_sriov_free_msix_res(pf))
651 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
653 devm_kfree(dev, pf->vf);
656 /* This check is for when the driver is unloaded while VFs are
657 * assigned. Setting the number of VFs to 0 through sysfs is caught
658 * before this function ever gets called.
660 if (!pci_vfs_assigned(pf->pdev)) {
663 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
664 * work correctly when SR-IOV gets re-enabled.
666 for (vf_id = 0; vf_id < tmp; vf_id++) {
667 u32 reg_idx, bit_idx;
669 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
670 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
671 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
675 /* clear malicious info if the VFs are getting released */
676 for (i = 0; i < tmp; i++)
677 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
678 ICE_MAX_VF_COUNT, i))
679 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
682 clear_bit(ICE_VF_DIS, pf->state);
683 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
687 * ice_trigger_vf_reset - Reset a VF on HW
688 * @vf: pointer to the VF structure
689 * @is_vflr: true if VFLR was issued, false if not
690 * @is_pfr: true if the reset was triggered due to a previous PFR
692 * Trigger hardware to start a reset for a particular VF. Expects the caller
693 * to wait the proper amount of time to allow hardware to reset the VF before
694 * it cleans up and restores VF functionality.
696 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
698 struct ice_pf *pf = vf->pf;
699 u32 reg, reg_idx, bit_idx;
700 unsigned int vf_abs_id, i;
704 dev = ice_pf_to_dev(pf);
706 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
708 /* Inform VF that it is no longer active, as a warning */
709 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
711 /* Disable VF's configuration API during reset. The flag is re-enabled
712 * when it's safe again to access VF's VSI.
714 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
716 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
717 * needs to clear them in the case of VFR/VFLR. If this is done for
718 * PFR, it can mess up VF resets because the VF driver may already
719 * have started cleanup by the time we get here.
722 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
723 wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
726 /* In the case of a VFLR, the HW has already reset the VF and we
727 * just need to clean up, so don't hit the VFRTRIG register.
730 /* reset VF using VPGEN_VFRTRIG reg */
731 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
732 reg |= VPGEN_VFRTRIG_VFSWR_M;
733 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
735 /* clear the VFLR bit in GLGEN_VFLRSTAT */
736 reg_idx = (vf_abs_id) / 32;
737 bit_idx = (vf_abs_id) % 32;
738 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
741 wr32(hw, PF_PCI_CIAA,
742 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
743 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
744 reg = rd32(hw, PF_PCI_CIAD);
745 /* no transactions pending so stop polling */
746 if ((reg & VF_TRANS_PENDING_M) == 0)
749 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
750 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
755 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
756 * @vsi: the VSI to update
757 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
758 * @enable: true for enable PVID false for disable
760 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
762 struct ice_hw *hw = &vsi->back->hw;
763 struct ice_aqc_vsi_props *info;
764 struct ice_vsi_ctx *ctxt;
765 enum ice_status status;
768 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
772 ctxt->info = vsi->info;
775 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
776 ICE_AQ_VSI_PVLAN_INSERT_PVID |
777 ICE_AQ_VSI_VLAN_EMOD_STR;
778 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
780 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
781 ICE_AQ_VSI_VLAN_MODE_ALL;
782 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
785 info->pvid = cpu_to_le16(pvid_info);
786 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
787 ICE_AQ_VSI_PROP_SW_VALID);
789 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
791 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
792 ice_stat_str(status),
793 ice_aq_str(hw->adminq.sq_last_status));
798 vsi->info.vlan_flags = info->vlan_flags;
799 vsi->info.sw_flags2 = info->sw_flags2;
800 vsi->info.pvid = info->pvid;
807 * ice_vf_get_port_info - Get the VF's port info structure
808 * @vf: VF used to get the port info structure for
810 static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
812 return vf->pf->hw.port_info;
816 * ice_vf_vsi_setup - Set up a VF VSI
817 * @vf: VF to setup VSI for
819 * Returns pointer to the successfully allocated VSI struct on success,
820 * otherwise returns NULL on failure.
822 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
824 struct ice_port_info *pi = ice_vf_get_port_info(vf);
825 struct ice_pf *pf = vf->pf;
828 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
831 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
832 ice_vf_invalidate_vsi(vf);
836 vf->lan_vsi_idx = vsi->idx;
837 vf->lan_vsi_num = vsi->vsi_num;
843 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
844 * @vf: VF to setup control VSI for
846 * Returns pointer to the successfully allocated VSI struct on success,
847 * otherwise returns NULL on failure.
849 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
851 struct ice_port_info *pi = ice_vf_get_port_info(vf);
852 struct ice_pf *pf = vf->pf;
855 vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
857 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
858 ice_vf_ctrl_invalidate_vsi(vf);
865 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
866 * @pf: pointer to PF structure
867 * @vf: pointer to VF that the first MSIX vector index is being calculated for
869 * This returns the first MSIX vector index in PF space that is used by this VF.
870 * This index is used when accessing PF relative registers such as
871 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
872 * This will always be the OICR index in the AVF driver so any functionality
873 * using vf->first_vector_idx for queue configuration will have to increment by
874 * 1 to avoid meddling with the OICR index.
876 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
878 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
882 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
883 * @vf: VF to add MAC filters for
885 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
886 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
888 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
890 struct device *dev = ice_pf_to_dev(vf->pf);
891 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
895 if (vf->port_vlan_info) {
896 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
898 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
903 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
906 /* vlan_id will either be 0 or the port VLAN number */
907 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
909 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
910 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
919 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
920 * @vf: VF to add MAC filters for
922 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
923 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
925 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
927 struct device *dev = ice_pf_to_dev(vf->pf);
928 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
929 enum ice_status status;
930 u8 broadcast[ETH_ALEN];
932 eth_broadcast_addr(broadcast);
933 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
935 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
936 vf->vf_id, ice_stat_str(status));
937 return ice_status_to_errno(status);
942 if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
943 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
946 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
947 &vf->hw_lan_addr.addr[0], vf->vf_id,
948 ice_stat_str(status));
949 return ice_status_to_errno(status);
953 ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
960 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
961 * @vf: VF to configure trust setting for
963 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
966 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
968 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
972 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
973 * @vf: VF to enable MSIX mappings for
975 * Some of the registers need to be indexed/configured using hardware global
976 * device values and other registers need 0-based values, which represent PF
979 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
981 int device_based_first_msix, device_based_last_msix;
982 int pf_based_first_msix, pf_based_last_msix, v;
983 struct ice_pf *pf = vf->pf;
984 int device_based_vf_id;
989 pf_based_first_msix = vf->first_vector_idx;
990 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
992 device_based_first_msix = pf_based_first_msix +
993 pf->hw.func_caps.common_cap.msix_vector_first_id;
994 device_based_last_msix =
995 (device_based_first_msix + pf->num_msix_per_vf) - 1;
996 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
998 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
999 VPINT_ALLOC_FIRST_M) |
1000 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
1001 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
1002 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
1004 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1005 & VPINT_ALLOC_PCI_FIRST_M) |
1006 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
1007 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
1008 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
1010 /* map the interrupts to its functions */
1011 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
1012 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
1013 GLINT_VECT2FUNC_VF_NUM_M) |
1014 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
1015 GLINT_VECT2FUNC_PF_NUM_M));
1016 wr32(hw, GLINT_VECT2FUNC(v), reg);
1019 /* Map mailbox interrupt to VF MSI-X vector 0 */
1020 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
1024 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
1025 * @vf: VF to enable the mappings for
1026 * @max_txq: max Tx queues allowed on the VF's VSI
1027 * @max_rxq: max Rx queues allowed on the VF's VSI
1029 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
1031 struct device *dev = ice_pf_to_dev(vf->pf);
1032 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1033 struct ice_hw *hw = &vf->pf->hw;
1036 /* set regardless of mapping mode */
1037 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
1039 /* VF Tx queues allocation */
1040 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1041 /* set the VF PF Tx queue range
1042 * VFNUMQ value should be set to (number of queues - 1). A value
1043 * of 0 means 1 queue and a value of 255 means 256 queues
1045 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
1046 VPLAN_TX_QBASE_VFFIRSTQ_M) |
1047 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
1048 VPLAN_TX_QBASE_VFNUMQ_M));
1049 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
1051 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
1054 /* set regardless of mapping mode */
1055 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
1057 /* VF Rx queues allocation */
1058 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1059 /* set the VF PF Rx queue range
1060 * VFNUMQ value should be set to (number of queues - 1). A value
1061 * of 0 means 1 queue and a value of 255 means 256 queues
1063 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
1064 VPLAN_RX_QBASE_VFFIRSTQ_M) |
1065 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
1066 VPLAN_RX_QBASE_VFNUMQ_M));
1067 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
1069 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
1074 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
1075 * @vf: pointer to the VF structure
1077 static void ice_ena_vf_mappings(struct ice_vf *vf)
1079 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1081 ice_ena_vf_msix_mappings(vf);
1082 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
1087 * @pf: pointer to the PF structure
1088 * @avail_res: available resources in the PF structure
1089 * @max_res: maximum resources that can be given per VF
1090 * @min_res: minimum resources that can be given per VF
1092 * Returns non-zero value if resources (queues/vectors) are available or
1093 * returns zero if PF cannot accommodate for all num_alloc_vfs.
1096 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
1098 bool checked_min_res = false;
1101 /* start by checking if PF can assign max number of resources for
1102 * all num_alloc_vfs.
1103 * if yes, return number per VF
1104 * If no, divide by 2 and roundup, check again
1105 * repeat the loop till we reach a point where even minimum resources
1106 * are not available, in that case return 0
1109 while ((res >= min_res) && !checked_min_res) {
1112 num_all_res = pf->num_alloc_vfs * res;
1113 if (num_all_res <= avail_res)
1117 checked_min_res = true;
1119 res = DIV_ROUND_UP(res, 2);
1125 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
1126 * @vf: VF to calculate the register index for
1127 * @q_vector: a q_vector associated to the VF
1129 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
1133 if (!vf || !q_vector)
1138 /* always add one to account for the OICR being the first MSIX */
1139 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
1140 q_vector->v_idx + 1;
1144 * ice_get_max_valid_res_idx - Get the max valid resource index
1145 * @res: pointer to the resource to find the max valid index for
1147 * Start from the end of the ice_res_tracker and return right when we find the
1148 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
1149 * valid for SR-IOV because it is the only consumer that manipulates the
1150 * res->end and this is always called when res->end is set to res->num_entries.
1152 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
1159 for (i = res->num_entries - 1; i >= 0; i--)
1160 if (res->list[i] & ICE_RES_VALID_BIT)
1167 * ice_sriov_set_msix_res - Set any used MSIX resources
1168 * @pf: pointer to PF structure
1169 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
1171 * This function allows SR-IOV resources to be taken from the end of the PF's
1172 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
1173 * just set the pf->sriov_base_vector and return success.
1175 * If there are not enough resources available, return an error. This should
1176 * always be caught by ice_set_per_vf_res().
1178 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
1179 * in the PF's space available for SR-IOV.
1181 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
1183 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1184 int vectors_used = pf->irq_tracker->num_entries;
1185 int sriov_base_vector;
1187 sriov_base_vector = total_vectors - num_msix_needed;
1189 /* make sure we only grab irq_tracker entries from the list end and
1190 * that we have enough available MSIX vectors
1192 if (sriov_base_vector < vectors_used)
1195 pf->sriov_base_vector = sriov_base_vector;
1201 * ice_set_per_vf_res - check if vectors and queues are available
1202 * @pf: pointer to the PF structure
1204 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
1205 * get more vectors and can enable more queues per VF. Note that this does not
1206 * grab any vectors from the SW pool already allocated. Also note, that all
1207 * vector counts include one for each VF's miscellaneous interrupt vector
1210 * Minimum VFs - 2 vectors, 1 queue pair
1211 * Small VFs - 5 vectors, 4 queue pairs
1212 * Medium VFs - 17 vectors, 16 queue pairs
1214 * Second, determine number of queue pairs per VF by starting with a pre-defined
1215 * maximum each VF supports. If this is not possible, then we adjust based on
1216 * queue pairs available on the device.
1218 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
1219 * by each VF during VF initialization and reset.
1221 static int ice_set_per_vf_res(struct ice_pf *pf)
1223 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
1224 int msix_avail_per_vf, msix_avail_for_sriov;
1225 struct device *dev = ice_pf_to_dev(pf);
1226 u16 num_msix_per_vf, num_txq, num_rxq;
1228 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
1231 /* determine MSI-X resources per VF */
1232 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
1233 pf->irq_tracker->num_entries;
1234 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
1235 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
1236 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
1237 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
1238 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
1239 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
1240 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
1241 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
1242 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
1244 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
1245 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
1250 /* determine queue resources per VF */
1251 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
1253 num_msix_per_vf - ICE_NONQ_VECS_VF,
1254 ICE_MAX_RSS_QS_PER_VF),
1257 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
1259 num_msix_per_vf - ICE_NONQ_VECS_VF,
1260 ICE_MAX_RSS_QS_PER_VF),
1263 if (!num_txq || !num_rxq) {
1264 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
1265 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
1269 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
1270 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
1275 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
1276 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1277 pf->num_msix_per_vf = num_msix_per_vf;
1278 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
1279 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
1285 * ice_clear_vf_reset_trigger - enable VF to access hardware
1286 * @vf: VF to enabled hardware access for
1288 static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1290 struct ice_hw *hw = &vf->pf->hw;
1293 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1294 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1295 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1300 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1301 * @vf: pointer to the VF info
1302 * @vsi: the VSI being configured
1303 * @promisc_m: mask of promiscuous config bits
1304 * @rm_promisc: promisc flag request from the VF to remove or add filter
1306 * This function configures VF VSI promiscuous mode, based on the VF requests,
1307 * for Unicast, Multicast and VLAN
1309 static enum ice_status
1310 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1313 struct ice_pf *pf = vf->pf;
1314 enum ice_status status = 0;
1318 if (vsi->num_vlan) {
1319 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1321 } else if (vf->port_vlan_info) {
1323 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1324 vf->port_vlan_info);
1326 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1327 vf->port_vlan_info);
1330 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1333 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1340 static void ice_vf_clear_counters(struct ice_vf *vf)
1342 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1346 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1347 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1351 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1352 * @vf: VF to perform pre VSI rebuild tasks
1354 * These tasks are items that don't need to be amortized since they are most
1355 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1357 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1359 ice_vf_clear_counters(vf);
1360 ice_clear_vf_reset_trigger(vf);
1364 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1365 * @vsi: Pointer to VSI
1367 * This function moves VSI into corresponding scheduler aggregator node
1368 * based on cached value of "aggregator node info" per VSI
1370 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1372 struct ice_pf *pf = vsi->back;
1373 enum ice_status status;
1379 dev = ice_pf_to_dev(pf);
1380 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1382 "agg_id %u already has reached max_num_vsis %u\n",
1383 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1387 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1388 vsi->idx, vsi->tc_cfg.ena_tc);
1390 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1391 vsi->idx, vsi->agg_node->agg_id);
1393 vsi->agg_node->num_vsis++;
1397 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1398 * @vf: VF to rebuild host configuration on
1400 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1402 struct device *dev = ice_pf_to_dev(vf->pf);
1403 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1405 ice_vf_set_host_trust_cfg(vf);
1407 if (ice_vf_rebuild_host_mac_cfg(vf))
1408 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1411 if (ice_vf_rebuild_host_vlan_cfg(vf))
1412 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1414 /* rebuild aggregator node config for main VF VSI */
1415 ice_vf_rebuild_aggregator_node_cfg(vsi);
1419 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1420 * @vf: VF to release and setup the VSI for
1422 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1423 * configuration change, etc.).
1425 static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1427 ice_vf_vsi_release(vf);
1428 if (!ice_vf_vsi_setup(vf))
1435 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1436 * @vf: VF to rebuild the VSI for
1438 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1439 * host, PFR, CORER, etc.).
1441 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1443 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1444 struct ice_pf *pf = vf->pf;
1446 if (ice_vsi_rebuild(vsi, true)) {
1447 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1451 /* vsi->idx will remain the same in this case so don't update
1454 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1455 vf->lan_vsi_num = vsi->vsi_num;
1461 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1462 * @vf: VF to set in initialized state
1464 * After this function the VF will be ready to receive/handle the
1465 * VIRTCHNL_OP_GET_VF_RESOURCES message
1467 static void ice_vf_set_initialized(struct ice_vf *vf)
1469 ice_set_vf_state_qs_dis(vf);
1470 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1471 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1472 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1473 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1477 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1478 * @vf: VF to perform tasks on
1480 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1482 struct ice_pf *pf = vf->pf;
1487 ice_vf_rebuild_host_cfg(vf);
1489 ice_vf_set_initialized(vf);
1490 ice_ena_vf_mappings(vf);
1491 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1495 * ice_reset_all_vfs - reset all allocated VFs in one go
1496 * @pf: pointer to the PF structure
1497 * @is_vflr: true if VFLR was issued, false if not
1499 * First, tell the hardware to reset each VF, then do all the waiting in one
1500 * chunk, and finally finish restoring each VF after the wait. This is useful
1501 * during PF routines which need to reset all VFs, as otherwise it must perform
1502 * these resets in a serialized fashion.
1504 * Returns true if any VFs were reset, and false otherwise.
1506 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1508 struct device *dev = ice_pf_to_dev(pf);
1509 struct ice_hw *hw = &pf->hw;
1513 /* If we don't have any VFs, then there is nothing to reset */
1514 if (!pf->num_alloc_vfs)
1517 /* clear all malicious info if the VFs are getting reset */
1518 ice_for_each_vf(pf, i)
1519 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
1520 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1522 /* If VFs have been disabled, there is no need to reset */
1523 if (test_and_set_bit(ICE_VF_DIS, pf->state))
1526 /* Begin reset on all VFs at once */
1527 ice_for_each_vf(pf, v)
1528 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1530 /* HW requires some time to make sure it can flush the FIFO for a VF
1531 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1532 * sequence to make sure that it has completed. We'll keep track of
1533 * the VFs using a simple iterator that increments once that VF has
1534 * finished resetting.
1536 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1537 /* Check each VF in sequence */
1538 while (v < pf->num_alloc_vfs) {
1542 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1543 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1544 /* only delay if the check failed */
1545 usleep_range(10, 20);
1549 /* If the current VF has finished resetting, move on
1550 * to the next VF in sequence.
1556 /* Display a warning if at least one VF didn't manage to reset in
1557 * time, but continue on with the operation.
1559 if (v < pf->num_alloc_vfs)
1560 dev_warn(dev, "VF reset check timeout\n");
1562 /* free VF resources to begin resetting the VSI state */
1563 ice_for_each_vf(pf, v) {
1566 vf->driver_caps = 0;
1567 ice_vc_set_default_allowlist(vf);
1569 ice_vf_fdir_exit(vf);
1570 /* clean VF control VSI when resetting VFs since it should be
1571 * setup only when VF creates its first FDIR rule.
1573 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1574 ice_vf_ctrl_invalidate_vsi(vf);
1576 ice_vf_pre_vsi_rebuild(vf);
1577 ice_vf_rebuild_vsi(vf);
1578 ice_vf_post_vsi_rebuild(vf);
1582 clear_bit(ICE_VF_DIS, pf->state);
1588 * ice_is_vf_disabled
1589 * @vf: pointer to the VF info
1591 * Returns true if the PF or VF is disabled, false otherwise.
1593 static bool ice_is_vf_disabled(struct ice_vf *vf)
1595 struct ice_pf *pf = vf->pf;
1597 /* If the PF has been disabled, there is no need resetting VF until
1598 * PF is active again. Similarly, if the VF has been disabled, this
1599 * means something else is resetting the VF, so we shouldn't continue.
1600 * Otherwise, set disable VF state bit for actual reset, and continue.
1602 return (test_bit(ICE_VF_DIS, pf->state) ||
1603 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1607 * ice_reset_vf - Reset a particular VF
1608 * @vf: pointer to the VF structure
1609 * @is_vflr: true if VFLR was issued, false if not
1611 * Returns true if the VF is currently in reset, resets successfully, or resets
1612 * are disabled and false otherwise.
1614 bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1616 struct ice_pf *pf = vf->pf;
1617 struct ice_vsi *vsi;
1625 dev = ice_pf_to_dev(pf);
1627 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
1628 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1633 if (ice_is_vf_disabled(vf)) {
1634 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1639 /* Set VF disable bit state here, before triggering reset */
1640 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1641 ice_trigger_vf_reset(vf, is_vflr, false);
1643 vsi = ice_get_vf_vsi(vf);
1645 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1648 /* Call Disable LAN Tx queue AQ whether or not queues are
1649 * enabled. This is needed for successful completion of VFR.
1651 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1652 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1655 /* poll VPGEN_VFRSTAT reg to make sure
1656 * that reset is complete
1658 for (i = 0; i < 10; i++) {
1659 /* VF reset requires driver to first reset the VF and then
1660 * poll the status register to make sure that the reset
1661 * completed successfully.
1663 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1664 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1669 /* only sleep if the reset is not done */
1670 usleep_range(10, 20);
1673 vf->driver_caps = 0;
1674 ice_vc_set_default_allowlist(vf);
1676 /* Display a warning if VF didn't manage to reset in time, but need to
1677 * continue on with the operation.
1680 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1682 /* disable promiscuous modes in case they were enabled
1683 * ignore any error if disabling process failed
1685 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1686 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1687 if (vf->port_vlan_info || vsi->num_vlan)
1688 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1690 promisc_m = ICE_UCAST_PROMISC_BITS;
1692 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1693 dev_err(dev, "disabling promiscuous mode failed\n");
1696 ice_vf_fdir_exit(vf);
1697 /* clean VF control VSI when resetting VF since it should be setup
1698 * only when VF creates its first FDIR rule.
1700 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1701 ice_vf_ctrl_vsi_release(vf);
1703 ice_vf_pre_vsi_rebuild(vf);
1705 if (ice_vf_rebuild_vsi_with_release(vf)) {
1706 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1710 ice_vf_post_vsi_rebuild(vf);
1712 /* if the VF has been reset allow it to come up again */
1713 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
1714 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1720 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1721 * @pf: pointer to the PF structure
1723 void ice_vc_notify_link_state(struct ice_pf *pf)
1727 ice_for_each_vf(pf, i)
1728 ice_vc_notify_vf_link_state(&pf->vf[i]);
1732 * ice_vc_notify_reset - Send pending reset message to all VFs
1733 * @pf: pointer to the PF structure
1735 * indicate a pending reset to all VFs on a given PF
1737 void ice_vc_notify_reset(struct ice_pf *pf)
1739 struct virtchnl_pf_event pfe;
1741 if (!pf->num_alloc_vfs)
1744 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1745 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1746 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1747 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1751 * ice_vc_notify_vf_reset - Notify VF of a reset event
1752 * @vf: pointer to the VF structure
1754 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1756 struct virtchnl_pf_event pfe;
1763 if (ice_validate_vf_id(pf, vf->vf_id))
1766 /* Bail out if VF is in disabled state, neither initialized, nor active
1767 * state - otherwise proceed with notifications
1769 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1770 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1771 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1774 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1775 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1776 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1777 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1782 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1783 * @vf: VF to initialize/setup the VSI for
1785 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1786 * VF VSI's broadcast filter and is only used during initial VF creation.
1788 static int ice_init_vf_vsi_res(struct ice_vf *vf)
1790 struct ice_pf *pf = vf->pf;
1791 u8 broadcast[ETH_ALEN];
1792 enum ice_status status;
1793 struct ice_vsi *vsi;
1797 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1799 dev = ice_pf_to_dev(pf);
1800 vsi = ice_vf_vsi_setup(vf);
1804 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1806 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1811 eth_broadcast_addr(broadcast);
1812 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1814 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1815 vf->vf_id, ice_stat_str(status));
1816 err = ice_status_to_errno(status);
1825 ice_vf_vsi_release(vf);
1830 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1831 * @pf: PF the VFs are associated with
1833 static int ice_start_vfs(struct ice_pf *pf)
1835 struct ice_hw *hw = &pf->hw;
1838 ice_for_each_vf(pf, i) {
1839 struct ice_vf *vf = &pf->vf[i];
1841 ice_clear_vf_reset_trigger(vf);
1843 retval = ice_init_vf_vsi_res(vf);
1845 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1850 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1851 ice_ena_vf_mappings(vf);
1852 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1859 for (i = i - 1; i >= 0; i--) {
1860 struct ice_vf *vf = &pf->vf[i];
1862 ice_dis_vf_mappings(vf);
1863 ice_vf_vsi_release(vf);
1870 * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
1871 * @pf: PF holding reference to all VFs for default configuration
1873 static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1877 ice_for_each_vf(pf, i) {
1878 struct ice_vf *vf = &pf->vf[i];
1882 vf->vf_sw_id = pf->first_sw;
1883 /* assign default capabilities */
1884 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1885 vf->spoofchk = true;
1886 vf->num_vf_qs = pf->num_qps_per_vf;
1887 ice_vc_set_default_allowlist(vf);
1889 /* ctrl_vsi_idx will be set to a valid value only when VF
1890 * creates its first fdir rule.
1892 ice_vf_ctrl_invalidate_vsi(vf);
1893 ice_vf_fdir_init(vf);
1898 * ice_alloc_vfs - allocate num_vfs in the PF structure
1899 * @pf: PF to store the allocated VFs in
1900 * @num_vfs: number of VFs to allocate
1902 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1906 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1912 pf->num_alloc_vfs = num_vfs;
1918 * ice_ena_vfs - enable VFs so they are ready to be used
1919 * @pf: pointer to the PF structure
1920 * @num_vfs: number of VFs to enable
1922 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1924 struct device *dev = ice_pf_to_dev(pf);
1925 struct ice_hw *hw = &pf->hw;
1928 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1929 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1930 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1931 set_bit(ICE_OICR_INTR_DIS, pf->state);
1934 ret = pci_enable_sriov(pf->pdev, num_vfs);
1936 pf->num_alloc_vfs = 0;
1937 goto err_unroll_intr;
1940 ret = ice_alloc_vfs(pf, num_vfs);
1942 goto err_pci_disable_sriov;
1944 if (ice_set_per_vf_res(pf)) {
1945 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1948 goto err_unroll_sriov;
1951 ice_set_dflt_settings_vfs(pf);
1953 if (ice_start_vfs(pf)) {
1954 dev_err(dev, "Failed to start VF(s)\n");
1956 goto err_unroll_sriov;
1959 clear_bit(ICE_VF_DIS, pf->state);
1963 devm_kfree(dev, pf->vf);
1965 pf->num_alloc_vfs = 0;
1966 err_pci_disable_sriov:
1967 pci_disable_sriov(pf->pdev);
1969 /* rearm interrupts here */
1970 ice_irq_dynamic_ena(hw, NULL, NULL);
1971 clear_bit(ICE_OICR_INTR_DIS, pf->state);
1976 * ice_pci_sriov_ena - Enable or change number of VFs
1977 * @pf: pointer to the PF structure
1978 * @num_vfs: number of VFs to allocate
1980 * Returns 0 on success and negative on failure
1982 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1984 int pre_existing_vfs = pci_num_vf(pf->pdev);
1985 struct device *dev = ice_pf_to_dev(pf);
1988 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1990 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1993 if (num_vfs > pf->num_vfs_supported) {
1994 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1995 num_vfs, pf->num_vfs_supported);
1999 dev_info(dev, "Enabling %d VFs\n", num_vfs);
2000 err = ice_ena_vfs(pf, num_vfs);
2002 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
2006 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
2011 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
2012 * @pf: PF to enabled SR-IOV on
2014 static int ice_check_sriov_allowed(struct ice_pf *pf)
2016 struct device *dev = ice_pf_to_dev(pf);
2018 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
2019 dev_err(dev, "This device is not capable of SR-IOV\n");
2023 if (ice_is_safe_mode(pf)) {
2024 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
2028 if (!ice_pf_state_is_nominal(pf)) {
2029 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
2037 * ice_sriov_configure - Enable or change number of VFs via sysfs
2038 * @pdev: pointer to a pci_dev structure
2039 * @num_vfs: number of VFs to allocate or 0 to free VFs
2041 * This function is called when the user updates the number of VFs in sysfs. On
2042 * success return whatever num_vfs was set to by the caller. Return negative on
2045 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
2047 struct ice_pf *pf = pci_get_drvdata(pdev);
2048 struct device *dev = ice_pf_to_dev(pf);
2049 enum ice_status status;
2052 err = ice_check_sriov_allowed(pf);
2057 if (!pci_vfs_assigned(pdev)) {
2058 ice_mbx_deinit_snapshot(&pf->hw);
2061 ice_enable_lag(pf->lag);
2065 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
2069 status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
2071 return ice_status_to_errno(status);
2073 err = ice_pci_sriov_ena(pf, num_vfs);
2075 ice_mbx_deinit_snapshot(&pf->hw);
2080 ice_disable_lag(pf->lag);
2085 * ice_process_vflr_event - Free VF resources via IRQ calls
2086 * @pf: pointer to the PF structure
2088 * called from the VFLR IRQ handler to
2089 * free up VF resources and state variables
2091 void ice_process_vflr_event(struct ice_pf *pf)
2093 struct ice_hw *hw = &pf->hw;
2097 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2101 ice_for_each_vf(pf, vf_id) {
2102 struct ice_vf *vf = &pf->vf[vf_id];
2103 u32 reg_idx, bit_idx;
2105 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2106 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2107 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2108 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
2109 if (reg & BIT(bit_idx))
2110 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
2111 ice_reset_vf(vf, true);
2116 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
2117 * @vf: pointer to the VF info
2119 static void ice_vc_reset_vf(struct ice_vf *vf)
2121 ice_vc_notify_vf_reset(vf);
2122 ice_reset_vf(vf, false);
2126 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
2127 * @pf: PF used to index all VFs
2128 * @pfq: queue index relative to the PF's function space
2130 * If no VF is found who owns the pfq then return NULL, otherwise return a
2131 * pointer to the VF who owns the pfq
2133 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
2137 ice_for_each_vf(pf, vf_id) {
2138 struct ice_vf *vf = &pf->vf[vf_id];
2139 struct ice_vsi *vsi;
2142 vsi = ice_get_vf_vsi(vf);
2144 ice_for_each_rxq(vsi, rxq_idx)
2145 if (vsi->rxq_map[rxq_idx] == pfq)
2153 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
2154 * @pf: PF used for conversion
2155 * @globalq: global queue index used to convert to PF space queue index
2157 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
2159 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
2163 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
2164 * @pf: PF that the LAN overflow event happened on
2165 * @event: structure holding the event information for the LAN overflow event
2167 * Determine if the LAN overflow event was caused by a VF queue. If it was not
2168 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
2169 * reset on the offending VF.
2172 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2174 u32 gldcb_rtctq, queue;
2177 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
2178 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
2180 /* event returns device global Rx queue number */
2181 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
2182 GLDCB_RTCTQ_RXQNUM_S;
2184 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
2188 ice_vc_reset_vf(vf);
2192 * ice_vc_send_msg_to_vf - Send message to VF
2193 * @vf: pointer to the VF info
2194 * @v_opcode: virtual channel opcode
2195 * @v_retval: virtual channel return value
2196 * @msg: pointer to the msg buffer
2197 * @msglen: msg length
2202 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
2203 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
2205 enum ice_status aq_ret;
2213 if (ice_validate_vf_id(pf, vf->vf_id))
2216 dev = ice_pf_to_dev(pf);
2218 /* single place to detect unsuccessful return values */
2220 vf->num_inval_msgs++;
2221 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
2222 v_opcode, v_retval);
2223 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
2224 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
2226 dev_err(dev, "Use PF Control I/F to enable the VF\n");
2227 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2231 vf->num_valid_msgs++;
2232 /* reset the invalid counter, if a valid message is received. */
2233 vf->num_inval_msgs = 0;
2236 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
2238 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
2239 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
2240 vf->vf_id, ice_stat_str(aq_ret),
2241 ice_aq_str(pf->hw.mailboxq.sq_last_status));
2249 * ice_vc_get_ver_msg
2250 * @vf: pointer to the VF info
2251 * @msg: pointer to the msg buffer
2253 * called from the VF to request the API version used by the PF
2255 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
2257 struct virtchnl_version_info info = {
2258 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2261 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2262 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2263 if (VF_IS_V10(&vf->vf_ver))
2264 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2266 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2267 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
2268 sizeof(struct virtchnl_version_info));
2272 * ice_vc_get_max_frame_size - get max frame size allowed for VF
2273 * @vf: VF used to determine max frame size
2275 * Max frame size is determined based on the current port's max frame size and
2276 * whether a port VLAN is configured on this VF. The VF is not aware whether
2277 * it's in a port VLAN so the PF needs to account for this in max frame size
2278 * checks and sending the max frame size to the VF.
2280 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
2282 struct ice_port_info *pi = ice_vf_get_port_info(vf);
2285 max_frame_size = pi->phy.link_info.max_frame_size;
2287 if (vf->port_vlan_info)
2288 max_frame_size -= VLAN_HLEN;
2290 return max_frame_size;
2294 * ice_vc_get_vf_res_msg
2295 * @vf: pointer to the VF info
2296 * @msg: pointer to the msg buffer
2298 * called from the VF to request its resources
2300 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
2302 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2303 struct virtchnl_vf_resource *vfres = NULL;
2304 struct ice_pf *pf = vf->pf;
2305 struct ice_vsi *vsi;
2309 if (ice_check_vf_init(pf, vf)) {
2310 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2314 len = sizeof(struct virtchnl_vf_resource);
2316 vfres = kzalloc(len, GFP_KERNEL);
2318 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2322 if (VF_IS_V11(&vf->vf_ver))
2323 vf->driver_caps = *(u32 *)msg;
2325 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2326 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2327 VIRTCHNL_VF_OFFLOAD_VLAN;
2329 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2330 vsi = ice_get_vf_vsi(vf);
2332 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336 if (!vsi->info.pvid)
2337 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2339 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2340 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2342 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
2343 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2345 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2348 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
2349 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
2351 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2352 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2354 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2355 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2357 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2358 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2360 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2361 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2363 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2364 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2366 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2367 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2369 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2370 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2372 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
2373 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
2375 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
2376 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
2378 vfres->num_vsis = 1;
2379 /* Tx and Rx queue are equal for VF */
2380 vfres->num_queue_pairs = vsi->num_txq;
2381 vfres->max_vectors = pf->num_msix_per_vf;
2382 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2383 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
2384 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
2386 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2387 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2388 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2389 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2390 vf->hw_lan_addr.addr);
2392 /* match guest capabilities */
2393 vf->driver_caps = vfres->vf_cap_flags;
2395 ice_vc_set_caps_allowlist(vf);
2396 ice_vc_set_working_allowlist(vf);
2398 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2401 /* send the response back to the VF */
2402 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2410 * ice_vc_reset_vf_msg
2411 * @vf: pointer to the VF info
2413 * called from the VF to reset itself,
2414 * unlike other virtchnl messages, PF driver
2415 * doesn't send the response back to the VF
2417 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2419 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2420 ice_reset_vf(vf, false);
2424 * ice_find_vsi_from_id
2425 * @pf: the PF structure to search for the VSI
2426 * @id: ID of the VSI it is searching for
2428 * searches for the VSI with the given ID
2430 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2434 ice_for_each_vsi(pf, i)
2435 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2442 * ice_vc_isvalid_vsi_id
2443 * @vf: pointer to the VF info
2444 * @vsi_id: VF relative VSI ID
2446 * check for the valid VSI ID
2448 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2450 struct ice_pf *pf = vf->pf;
2451 struct ice_vsi *vsi;
2453 vsi = ice_find_vsi_from_id(pf, vsi_id);
2455 return (vsi && (vsi->vf_id == vf->vf_id));
2459 * ice_vc_isvalid_q_id
2460 * @vf: pointer to the VF info
2462 * @qid: VSI relative queue ID
2464 * check for the valid queue ID
2466 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2468 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2469 /* allocated Tx and Rx queues should be always equal for VF VSI */
2470 return (vsi && (qid < vsi->alloc_txq));
2474 * ice_vc_isvalid_ring_len
2475 * @ring_len: length of ring
2477 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2480 static bool ice_vc_isvalid_ring_len(u16 ring_len)
2482 return ring_len == 0 ||
2483 (ring_len >= ICE_MIN_NUM_DESC &&
2484 ring_len <= ICE_MAX_NUM_DESC &&
2485 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2489 * ice_vc_parse_rss_cfg - parses hash fields and headers from
2490 * a specific virtchnl RSS cfg
2491 * @hw: pointer to the hardware
2492 * @rss_cfg: pointer to the virtchnl RSS cfg
2493 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
2495 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
2497 * Return true if all the protocol header and hash fields in the RSS cfg could
2498 * be parsed, else return false
2500 * This function parses the virtchnl RSS cfg to be the intended
2501 * hash fields and the intended header for RSS configuration
2504 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
2505 u32 *addl_hdrs, u64 *hash_flds)
2507 const struct ice_vc_hash_field_match_type *hf_list;
2508 const struct ice_vc_hdr_match_type *hdr_list;
2509 int i, hf_list_len, hdr_list_len;
2511 if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
2512 sizeof(hw->active_pkg_name))) {
2513 hf_list = ice_vc_hash_field_list_comms;
2514 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
2515 hdr_list = ice_vc_hdr_list_comms;
2516 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
2518 hf_list = ice_vc_hash_field_list_os;
2519 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
2520 hdr_list = ice_vc_hdr_list_os;
2521 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
2524 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
2525 struct virtchnl_proto_hdr *proto_hdr =
2526 &rss_cfg->proto_hdrs.proto_hdr[i];
2527 bool hdr_found = false;
2530 /* Find matched ice headers according to virtchnl headers. */
2531 for (j = 0; j < hdr_list_len; j++) {
2532 struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
2534 if (proto_hdr->type == hdr_map.vc_hdr) {
2535 *addl_hdrs |= hdr_map.ice_hdr;
2543 /* Find matched ice hash fields according to
2544 * virtchnl hash fields.
2546 for (j = 0; j < hf_list_len; j++) {
2547 struct ice_vc_hash_field_match_type hf_map = hf_list[j];
2549 if (proto_hdr->type == hf_map.vc_hdr &&
2550 proto_hdr->field_selector == hf_map.vc_hash_field) {
2551 *hash_flds |= hf_map.ice_hash_field;
2561 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
2563 * @caps: VF driver negotiated capabilities
2565 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
2568 static bool ice_vf_adv_rss_offload_ena(u32 caps)
2570 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
2574 * ice_vc_handle_rss_cfg
2575 * @vf: pointer to the VF info
2576 * @msg: pointer to the message buffer
2577 * @add: add a RSS config if true, otherwise delete a RSS config
2579 * This function adds/deletes a RSS config
2581 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
2583 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
2584 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
2585 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2586 struct device *dev = ice_pf_to_dev(vf->pf);
2587 struct ice_hw *hw = &vf->pf->hw;
2588 struct ice_vsi *vsi;
2590 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2591 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
2593 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2597 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
2598 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
2600 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2604 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2605 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2609 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
2610 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
2611 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
2612 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
2614 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2618 vsi = ice_get_vf_vsi(vf);
2620 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2624 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
2625 struct ice_vsi_ctx *ctx;
2626 enum ice_status status;
2627 u8 lut_type, hash_type;
2629 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
2630 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
2631 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
2633 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2635 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2639 ctx->info.q_opt_rss = ((lut_type <<
2640 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
2641 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
2643 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
2645 /* Preserve existing queueing option setting */
2646 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
2647 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
2648 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
2649 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
2651 ctx->info.valid_sections =
2652 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
2654 status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
2656 dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
2657 ice_stat_str(status),
2658 ice_aq_str(hw->adminq.sq_last_status));
2659 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2661 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
2666 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2667 u64 hash_flds = ICE_HASH_INVALID;
2669 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
2671 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2676 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
2678 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2679 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
2680 vsi->vsi_num, v_ret);
2683 enum ice_status status;
2685 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
2687 /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
2688 * if two configurations share the same profile remove
2689 * one of them actually removes both, since the
2690 * profile is deleted.
2692 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2693 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2694 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
2695 vf->vf_id, ice_stat_str(status));
2701 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
2705 * ice_vc_config_rss_key
2706 * @vf: pointer to the VF info
2707 * @msg: pointer to the msg buffer
2709 * Configure the VF's RSS key
2711 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2713 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2714 struct virtchnl_rss_key *vrk =
2715 (struct virtchnl_rss_key *)msg;
2716 struct ice_vsi *vsi;
2718 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2719 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2724 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2728 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2729 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2733 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2734 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2738 vsi = ice_get_vf_vsi(vf);
2740 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2744 if (ice_set_rss_key(vsi, vrk->key))
2745 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2747 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2752 * ice_vc_config_rss_lut
2753 * @vf: pointer to the VF info
2754 * @msg: pointer to the msg buffer
2756 * Configure the VF's RSS LUT
2758 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2760 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2761 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2762 struct ice_vsi *vsi;
2764 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2765 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2769 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2770 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2774 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2775 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2779 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2780 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2784 vsi = ice_get_vf_vsi(vf);
2786 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2790 if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2791 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2793 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2798 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2799 * @vf: The VF being resseting
2801 * The max poll time is about ~800ms, which is about the maximum time it takes
2802 * for a VF to be reset and/or a VF driver to be removed.
2804 static void ice_wait_on_vf_reset(struct ice_vf *vf)
2808 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2809 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2811 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2816 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2817 * @vf: VF to check if it's ready to be configured/queried
2819 * The purpose of this function is to make sure the VF is not in reset, not
2820 * disabled, and initialized so it can be configured and/or queried by a host
2823 static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2827 ice_wait_on_vf_reset(vf);
2829 if (ice_is_vf_disabled(vf))
2833 if (ice_check_vf_init(pf, vf))
2840 * ice_set_vf_spoofchk
2841 * @netdev: network interface device structure
2842 * @vf_id: VF identifier
2843 * @ena: flag to enable or disable feature
2845 * Enable or disable VF spoof checking
2847 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2849 struct ice_netdev_priv *np = netdev_priv(netdev);
2850 struct ice_pf *pf = np->vsi->back;
2851 struct ice_vsi_ctx *ctx;
2852 struct ice_vsi *vf_vsi;
2853 enum ice_status status;
2858 dev = ice_pf_to_dev(pf);
2859 if (ice_validate_vf_id(pf, vf_id))
2862 vf = &pf->vf[vf_id];
2863 ret = ice_check_vf_ready_for_cfg(vf);
2867 vf_vsi = ice_get_vf_vsi(vf);
2869 netdev_err(netdev, "VSI %d for VF %d is null\n",
2870 vf->lan_vsi_idx, vf->vf_id);
2874 if (vf_vsi->type != ICE_VSI_VF) {
2875 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2876 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2880 if (ena == vf->spoofchk) {
2881 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2885 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2889 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2890 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2892 ctx->info.sec_flags |=
2893 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2894 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2895 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2897 ctx->info.sec_flags &=
2898 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2899 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2900 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2903 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2905 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2906 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2907 ice_stat_str(status));
2912 /* only update spoofchk state and VSI context on success */
2913 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2922 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2923 * @pf: PF structure for accessing VF(s)
2925 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2928 bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2932 ice_for_each_vf(pf, vf_idx) {
2933 struct ice_vf *vf = &pf->vf[vf_idx];
2935 /* found a VF that has promiscuous mode configured */
2936 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2937 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2945 * ice_vc_cfg_promiscuous_mode_msg
2946 * @vf: pointer to the VF info
2947 * @msg: pointer to the msg buffer
2949 * called from the VF to configure VF VSIs promiscuous mode
2951 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2953 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2954 bool rm_promisc, alluni = false, allmulti = false;
2955 struct virtchnl_promisc_info *info =
2956 (struct virtchnl_promisc_info *)msg;
2957 struct ice_pf *pf = vf->pf;
2958 struct ice_vsi *vsi;
2962 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2963 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2967 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2968 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2972 vsi = ice_get_vf_vsi(vf);
2974 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2978 dev = ice_pf_to_dev(pf);
2979 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2980 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2982 /* Leave v_ret alone, lie to the VF on purpose. */
2986 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2989 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2992 rm_promisc = !allmulti && !alluni;
2994 if (vsi->num_vlan || vf->port_vlan_info) {
2995 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2996 struct net_device *pf_netdev;
2999 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3003 pf_netdev = pf_vsi->netdev;
3005 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
3007 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
3008 rm_promisc ? "ON" : "OFF", vf->vf_id,
3010 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3013 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
3015 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
3016 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3021 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
3022 bool set_dflt_vsi = alluni || allmulti;
3024 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
3025 /* only attempt to set the default forwarding VSI if
3026 * it's not currently set
3028 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
3029 else if (!set_dflt_vsi &&
3030 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
3031 /* only attempt to free the default forwarding VSI if we
3034 ret = ice_clear_dflt_vsi(pf->first_sw);
3037 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
3038 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
3039 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3043 enum ice_status status;
3047 if (vf->port_vlan_info || vsi->num_vlan)
3048 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3050 promisc_m = ICE_UCAST_PROMISC_BITS;
3051 } else if (allmulti) {
3052 if (vf->port_vlan_info || vsi->num_vlan)
3053 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3055 promisc_m = ICE_MCAST_PROMISC_BITS;
3057 if (vf->port_vlan_info || vsi->num_vlan)
3058 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3060 promisc_m = ICE_UCAST_PROMISC_BITS;
3063 /* Configure multicast/unicast with or without VLAN promiscuous
3066 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
3068 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
3069 rm_promisc ? "dis" : "en", vf->vf_id,
3070 ice_stat_str(status));
3071 v_ret = ice_err_to_virt_err(status);
3074 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
3075 rm_promisc ? "dis" : "en", vf->vf_id);
3080 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3081 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
3082 else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3083 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
3085 if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3086 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
3087 else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3088 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
3091 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3096 * ice_vc_get_stats_msg
3097 * @vf: pointer to the VF info
3098 * @msg: pointer to the msg buffer
3100 * called from the VF to get VSI stats
3102 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
3104 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3105 struct virtchnl_queue_select *vqs =
3106 (struct virtchnl_queue_select *)msg;
3107 struct ice_eth_stats stats = { 0 };
3108 struct ice_vsi *vsi;
3110 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3111 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3115 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3116 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3120 vsi = ice_get_vf_vsi(vf);
3122 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3126 ice_update_eth_stats(vsi);
3128 stats = vsi->eth_stats;
3131 /* send the response to the VF */
3132 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
3133 (u8 *)&stats, sizeof(stats));
3137 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
3138 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
3140 * Return true on successful validation, else false
3142 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
3144 if ((!vqs->rx_queues && !vqs->tx_queues) ||
3145 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
3146 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
3153 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
3154 * @vsi: VSI of the VF to configure
3155 * @q_idx: VF queue index used to determine the queue in the PF's space
3157 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3159 struct ice_hw *hw = &vsi->back->hw;
3160 u32 pfq = vsi->txq_map[q_idx];
3163 reg = rd32(hw, QINT_TQCTL(pfq));
3165 /* MSI-X index 0 in the VF's space is always for the OICR, which means
3166 * this is most likely a poll mode VF driver, so don't enable an
3167 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3169 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
3172 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
3176 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
3177 * @vsi: VSI of the VF to configure
3178 * @q_idx: VF queue index used to determine the queue in the PF's space
3180 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3182 struct ice_hw *hw = &vsi->back->hw;
3183 u32 pfq = vsi->rxq_map[q_idx];
3186 reg = rd32(hw, QINT_RQCTL(pfq));
3188 /* MSI-X index 0 in the VF's space is always for the OICR, which means
3189 * this is most likely a poll mode VF driver, so don't enable an
3190 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3192 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
3195 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
3200 * @vf: pointer to the VF info
3201 * @msg: pointer to the msg buffer
3203 * called from the VF to enable all or specific queue(s)
3205 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
3207 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3208 struct virtchnl_queue_select *vqs =
3209 (struct virtchnl_queue_select *)msg;
3210 struct ice_vsi *vsi;
3211 unsigned long q_map;
3214 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3215 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3219 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3220 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3224 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3225 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3229 vsi = ice_get_vf_vsi(vf);
3231 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3235 /* Enable only Rx rings, Tx rings were enabled by the FW when the
3236 * Tx queue group list was configured and the context bits were
3237 * programmed using ice_vsi_cfg_txqs
3239 q_map = vqs->rx_queues;
3240 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3241 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3242 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3246 /* Skip queue if enabled */
3247 if (test_bit(vf_q_id, vf->rxq_ena))
3250 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
3251 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
3252 vf_q_id, vsi->vsi_num);
3253 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3257 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
3258 set_bit(vf_q_id, vf->rxq_ena);
3261 q_map = vqs->tx_queues;
3262 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3263 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3264 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3268 /* Skip queue if enabled */
3269 if (test_bit(vf_q_id, vf->txq_ena))
3272 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
3273 set_bit(vf_q_id, vf->txq_ena);
3276 /* Set flag to indicate that queues are enabled */
3277 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
3278 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3281 /* send the response to the VF */
3282 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
3288 * @vf: pointer to the VF info
3289 * @msg: pointer to the msg buffer
3291 * called from the VF to disable all or specific
3294 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
3296 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3297 struct virtchnl_queue_select *vqs =
3298 (struct virtchnl_queue_select *)msg;
3299 struct ice_vsi *vsi;
3300 unsigned long q_map;
3303 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
3304 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
3305 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3309 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3310 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3314 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3315 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3319 vsi = ice_get_vf_vsi(vf);
3321 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3325 if (vqs->tx_queues) {
3326 q_map = vqs->tx_queues;
3328 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3329 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
3330 struct ice_txq_meta txq_meta = { 0 };
3332 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3333 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3337 /* Skip queue if not enabled */
3338 if (!test_bit(vf_q_id, vf->txq_ena))
3341 ice_fill_txq_meta(vsi, ring, &txq_meta);
3343 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
3345 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
3346 vf_q_id, vsi->vsi_num);
3347 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3351 /* Clear enabled queues flag */
3352 clear_bit(vf_q_id, vf->txq_ena);
3356 q_map = vqs->rx_queues;
3357 /* speed up Rx queue disable by batching them if possible */
3359 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
3360 if (ice_vsi_stop_all_rx_rings(vsi)) {
3361 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
3363 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
3369 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3370 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3371 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3375 /* Skip queue if not enabled */
3376 if (!test_bit(vf_q_id, vf->rxq_ena))
3379 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
3381 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
3382 vf_q_id, vsi->vsi_num);
3383 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3387 /* Clear enabled queues flag */
3388 clear_bit(vf_q_id, vf->rxq_ena);
3392 /* Clear enabled queues flag */
3393 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
3394 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3397 /* send the response to the VF */
3398 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
3404 * @vf: pointer to the VF info
3405 * @vsi: the VSI being configured
3406 * @vector_id: vector ID
3407 * @map: vector map for mapping vectors to queues
3408 * @q_vector: structure for interrupt vector
3409 * configure the IRQ to queue map
3412 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
3413 struct virtchnl_vector_map *map,
3414 struct ice_q_vector *q_vector)
3416 u16 vsi_q_id, vsi_q_id_idx;
3419 q_vector->num_ring_rx = 0;
3420 q_vector->num_ring_tx = 0;
3422 qmap = map->rxq_map;
3423 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3424 vsi_q_id = vsi_q_id_idx;
3426 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3427 return VIRTCHNL_STATUS_ERR_PARAM;
3429 q_vector->num_ring_rx++;
3430 q_vector->rx.itr_idx = map->rxitr_idx;
3431 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
3432 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
3433 q_vector->rx.itr_idx);
3436 qmap = map->txq_map;
3437 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3438 vsi_q_id = vsi_q_id_idx;
3440 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3441 return VIRTCHNL_STATUS_ERR_PARAM;
3443 q_vector->num_ring_tx++;
3444 q_vector->tx.itr_idx = map->txitr_idx;
3445 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
3446 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
3447 q_vector->tx.itr_idx);
3450 return VIRTCHNL_STATUS_SUCCESS;
3454 * ice_vc_cfg_irq_map_msg
3455 * @vf: pointer to the VF info
3456 * @msg: pointer to the msg buffer
3458 * called from the VF to configure the IRQ to queue map
3460 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
3462 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3463 u16 num_q_vectors_mapped, vsi_id, vector_id;
3464 struct virtchnl_irq_map_info *irqmap_info;
3465 struct virtchnl_vector_map *map;
3466 struct ice_pf *pf = vf->pf;
3467 struct ice_vsi *vsi;
3470 irqmap_info = (struct virtchnl_irq_map_info *)msg;
3471 num_q_vectors_mapped = irqmap_info->num_vectors;
3473 /* Check to make sure number of VF vectors mapped is not greater than
3474 * number of VF vectors originally allocated, and check that
3475 * there is actually at least a single VF queue vector mapped
3477 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3478 pf->num_msix_per_vf < num_q_vectors_mapped ||
3479 !num_q_vectors_mapped) {
3480 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3484 vsi = ice_get_vf_vsi(vf);
3486 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3490 for (i = 0; i < num_q_vectors_mapped; i++) {
3491 struct ice_q_vector *q_vector;
3493 map = &irqmap_info->vecmap[i];
3495 vector_id = map->vector_id;
3496 vsi_id = map->vsi_id;
3497 /* vector_id is always 0-based for each VF, and can never be
3498 * larger than or equal to the max allowed interrupts per VF
3500 if (!(vector_id < pf->num_msix_per_vf) ||
3501 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
3502 (!vector_id && (map->rxq_map || map->txq_map))) {
3503 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3507 /* No need to map VF miscellaneous or rogue vector */
3511 /* Subtract non queue vector from vector_id passed by VF
3512 * to get actual number of VSI queue vector array index
3514 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
3516 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3520 /* lookout for the invalid queue index */
3521 v_ret = (enum virtchnl_status_code)
3522 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
3528 /* send the response to the VF */
3529 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
3535 * @vf: pointer to the VF info
3536 * @msg: pointer to the msg buffer
3538 * called from the VF to configure the Rx/Tx queues
3540 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
3542 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3543 struct virtchnl_vsi_queue_config_info *qci =
3544 (struct virtchnl_vsi_queue_config_info *)msg;
3545 struct virtchnl_queue_pair_info *qpi;
3546 struct ice_pf *pf = vf->pf;
3547 struct ice_vsi *vsi;
3550 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3551 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3555 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
3556 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3560 vsi = ice_get_vf_vsi(vf);
3562 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3566 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
3567 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
3568 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
3569 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3570 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3574 for (i = 0; i < qci->num_queue_pairs; i++) {
3575 qpi = &qci->qpair[i];
3576 if (qpi->txq.vsi_id != qci->vsi_id ||
3577 qpi->rxq.vsi_id != qci->vsi_id ||
3578 qpi->rxq.queue_id != qpi->txq.queue_id ||
3579 qpi->txq.headwb_enabled ||
3580 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3581 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
3582 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
3583 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3587 q_idx = qpi->rxq.queue_id;
3589 /* make sure selected "q_idx" is in valid range of queues
3590 * for selected "vsi"
3592 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
3593 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3597 /* copy Tx queue info from VF into VSI */
3598 if (qpi->txq.ring_len > 0) {
3599 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3600 vsi->tx_rings[i]->count = qpi->txq.ring_len;
3601 if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
3602 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3607 /* copy Rx queue info from VF into VSI */
3608 if (qpi->rxq.ring_len > 0) {
3609 u16 max_frame_size = ice_vc_get_max_frame_size(vf);
3611 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3612 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3614 if (qpi->rxq.databuffer_size != 0 &&
3615 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3616 qpi->rxq.databuffer_size < 1024)) {
3617 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3620 vsi->rx_buf_len = qpi->rxq.databuffer_size;
3621 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
3622 if (qpi->rxq.max_pkt_size > max_frame_size ||
3623 qpi->rxq.max_pkt_size < 64) {
3624 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3628 vsi->max_frame = qpi->rxq.max_pkt_size;
3629 /* add space for the port VLAN since the VF driver is not
3630 * expected to account for it in the MTU calculation
3632 if (vf->port_vlan_info)
3633 vsi->max_frame += VLAN_HLEN;
3635 if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
3636 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3643 /* send the response to the VF */
3644 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3650 * @vf: pointer to the VF info
3652 static bool ice_is_vf_trusted(struct ice_vf *vf)
3654 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3658 * ice_can_vf_change_mac
3659 * @vf: pointer to the VF info
3661 * Return true if the VF is allowed to change its MAC filters, false otherwise
3663 static bool ice_can_vf_change_mac(struct ice_vf *vf)
3665 /* If the VF MAC address has been set administratively (via the
3666 * ndo_set_vf_mac command), then deny permission to the VF to
3667 * add/delete unicast MAC addresses, unless the VF is trusted
3669 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3676 * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
3677 * @vc_ether_addr: used to extract the type
3680 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
3682 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
3686 * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
3687 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3690 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
3692 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3694 return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
3698 * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
3699 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3701 * This function should only be called when the MAC address in
3702 * virtchnl_ether_addr is a valid unicast MAC
3705 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
3707 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
3709 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
3713 * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
3715 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3718 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3720 u8 *mac_addr = vc_ether_addr->addr;
3722 if (!is_valid_ether_addr(mac_addr))
3725 /* only allow legacy VF drivers to set the device and hardware MAC if it
3726 * is zero and allow new VF drivers to set the hardware MAC if the type
3727 * was correctly specified over VIRTCHNL
3729 if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
3730 is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
3731 ice_is_vc_addr_primary(vc_ether_addr)) {
3732 ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
3733 ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
3736 /* hardware and device MACs are already set, but its possible that the
3737 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
3738 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
3739 * away for the legacy VF driver case as it will be updated in the
3740 * delete flow for this case
3742 if (ice_is_vc_addr_legacy(vc_ether_addr)) {
3743 ether_addr_copy(vf->legacy_last_added_umac.addr,
3745 vf->legacy_last_added_umac.time_modified = jiffies;
3750 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3751 * @vf: pointer to the VF info
3752 * @vsi: pointer to the VF's VSI
3753 * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
3756 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3757 struct virtchnl_ether_addr *vc_ether_addr)
3759 struct device *dev = ice_pf_to_dev(vf->pf);
3760 u8 *mac_addr = vc_ether_addr->addr;
3761 enum ice_status status;
3763 /* device MAC already added */
3764 if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
3767 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3768 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3772 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3773 if (status == ICE_ERR_ALREADY_EXISTS) {
3774 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3777 } else if (status) {
3778 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3779 mac_addr, vf->vf_id, ice_stat_str(status));
3783 ice_vfhw_mac_add(vf, vc_ether_addr);
3791 * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
3792 * @last_added_umac: structure used to check expiration
3794 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
3796 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
3797 return time_is_before_jiffies(last_added_umac->time_modified +
3798 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
3802 * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
3804 * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
3807 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
3809 u8 *mac_addr = vc_ether_addr->addr;
3811 if (!is_valid_ether_addr(mac_addr) ||
3812 !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
3815 /* allow the device MAC to be repopulated in the add flow and don't
3816 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
3817 * to be persistent on VM reboot and across driver unload/load, which
3818 * won't work if we clear the hardware MAC here
3820 eth_zero_addr(vf->dev_lan_addr.addr);
3822 /* only update cached hardware MAC for legacy VF drivers on delete
3823 * because we cannot guarantee order/type of MAC from the VF driver
3825 if (ice_is_vc_addr_legacy(vc_ether_addr) &&
3826 !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
3827 ether_addr_copy(vf->dev_lan_addr.addr,
3828 vf->legacy_last_added_umac.addr);
3829 ether_addr_copy(vf->hw_lan_addr.addr,
3830 vf->legacy_last_added_umac.addr);
3835 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3836 * @vf: pointer to the VF info
3837 * @vsi: pointer to the VF's VSI
3838 * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
3841 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
3842 struct virtchnl_ether_addr *vc_ether_addr)
3844 struct device *dev = ice_pf_to_dev(vf->pf);
3845 u8 *mac_addr = vc_ether_addr->addr;
3846 enum ice_status status;
3848 if (!ice_can_vf_change_mac(vf) &&
3849 ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
3852 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3853 if (status == ICE_ERR_DOES_NOT_EXIST) {
3854 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3857 } else if (status) {
3858 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3859 mac_addr, vf->vf_id, ice_stat_str(status));
3863 ice_vfhw_mac_del(vf, vc_ether_addr);
3871 * ice_vc_handle_mac_addr_msg
3872 * @vf: pointer to the VF info
3873 * @msg: pointer to the msg buffer
3874 * @set: true if MAC filters are being set, false otherwise
3876 * add guest MAC address filter
3879 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3881 int (*ice_vc_cfg_mac)
3882 (struct ice_vf *vf, struct ice_vsi *vsi,
3883 struct virtchnl_ether_addr *virtchnl_ether_addr);
3884 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3885 struct virtchnl_ether_addr_list *al =
3886 (struct virtchnl_ether_addr_list *)msg;
3887 struct ice_pf *pf = vf->pf;
3888 enum virtchnl_ops vc_op;
3889 struct ice_vsi *vsi;
3893 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3894 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3896 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3897 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3900 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3901 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3902 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3903 goto handle_mac_exit;
3906 /* If this VF is not privileged, then we can't add more than a
3907 * limited number of addresses. Check to make sure that the
3908 * additions do not push us over the limit.
3910 if (set && !ice_is_vf_trusted(vf) &&
3911 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3912 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3914 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3915 goto handle_mac_exit;
3918 vsi = ice_get_vf_vsi(vf);
3920 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3921 goto handle_mac_exit;
3924 for (i = 0; i < al->num_elements; i++) {
3925 u8 *mac_addr = al->list[i].addr;
3928 if (is_broadcast_ether_addr(mac_addr) ||
3929 is_zero_ether_addr(mac_addr))
3932 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
3933 if (result == -EEXIST || result == -ENOENT) {
3935 } else if (result) {
3936 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3937 goto handle_mac_exit;
3942 /* send the response to the VF */
3943 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3947 * ice_vc_add_mac_addr_msg
3948 * @vf: pointer to the VF info
3949 * @msg: pointer to the msg buffer
3951 * add guest MAC address filter
3953 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3955 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3959 * ice_vc_del_mac_addr_msg
3960 * @vf: pointer to the VF info
3961 * @msg: pointer to the msg buffer
3963 * remove guest MAC address filter
3965 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3967 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3971 * ice_vc_request_qs_msg
3972 * @vf: pointer to the VF info
3973 * @msg: pointer to the msg buffer
3975 * VFs get a default number of queues but can use this message to request a
3976 * different number. If the request is successful, PF will reset the VF and
3977 * return 0. If unsuccessful, PF will send message informing VF of number of
3978 * available queue pairs via virtchnl message response to VF.
3980 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3982 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3983 struct virtchnl_vf_res_request *vfres =
3984 (struct virtchnl_vf_res_request *)msg;
3985 u16 req_queues = vfres->num_queue_pairs;
3986 struct ice_pf *pf = vf->pf;
3987 u16 max_allowed_vf_queues;
3988 u16 tx_rx_queue_left;
3992 dev = ice_pf_to_dev(pf);
3993 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3994 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3998 cur_queues = vf->num_vf_qs;
3999 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
4000 ice_get_avail_rxq_count(pf));
4001 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
4003 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
4005 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4006 dev_err(dev, "VF %d tried to request more than %d queues.\n",
4007 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
4008 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
4009 } else if (req_queues > cur_queues &&
4010 req_queues - cur_queues > tx_rx_queue_left) {
4011 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
4012 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
4013 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
4014 ICE_MAX_RSS_QS_PER_VF);
4016 /* request is successful, then reset VF */
4017 vf->num_req_qs = req_queues;
4018 ice_vc_reset_vf(vf);
4019 dev_info(dev, "VF %d granted request of %u queues.\n",
4020 vf->vf_id, req_queues);
4025 /* send the response to the VF */
4026 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
4027 v_ret, (u8 *)vfres, sizeof(*vfres));
4031 * ice_set_vf_port_vlan
4032 * @netdev: network interface device structure
4033 * @vf_id: VF identifier
4034 * @vlan_id: VLAN ID being set
4035 * @qos: priority setting
4036 * @vlan_proto: VLAN protocol
4038 * program VF Port VLAN ID and/or QoS
4041 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
4044 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4050 dev = ice_pf_to_dev(pf);
4051 if (ice_validate_vf_id(pf, vf_id))
4054 if (vlan_id >= VLAN_N_VID || qos > 7) {
4055 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
4056 vf_id, vlan_id, qos);
4060 if (vlan_proto != htons(ETH_P_8021Q)) {
4061 dev_err(dev, "VF VLAN protocol is not supported\n");
4062 return -EPROTONOSUPPORT;
4065 vf = &pf->vf[vf_id];
4066 ret = ice_check_vf_ready_for_cfg(vf);
4070 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
4072 if (vf->port_vlan_info == vlanprio) {
4073 /* duplicate request, so just return success */
4074 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
4078 vf->port_vlan_info = vlanprio;
4080 if (vf->port_vlan_info)
4081 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
4082 vlan_id, qos, vf_id);
4084 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
4086 ice_vc_reset_vf(vf);
4092 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
4093 * @caps: VF driver negotiated capabilities
4095 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
4097 static bool ice_vf_vlan_offload_ena(u32 caps)
4099 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
4103 * ice_vc_process_vlan_msg
4104 * @vf: pointer to the VF info
4105 * @msg: pointer to the msg buffer
4106 * @add_v: Add VLAN if true, otherwise delete VLAN
4108 * Process virtchnl op to add or remove programmed guest VLAN ID
4110 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
4112 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4113 struct virtchnl_vlan_filter_list *vfl =
4114 (struct virtchnl_vlan_filter_list *)msg;
4115 struct ice_pf *pf = vf->pf;
4116 bool vlan_promisc = false;
4117 struct ice_vsi *vsi;
4124 dev = ice_pf_to_dev(pf);
4125 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4126 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4130 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4131 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4135 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
4136 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4140 for (i = 0; i < vfl->num_elements; i++) {
4141 if (vfl->vlan_id[i] >= VLAN_N_VID) {
4142 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4143 dev_err(dev, "invalid VF VLAN id %d\n",
4150 vsi = ice_get_vf_vsi(vf);
4152 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4156 if (add_v && !ice_is_vf_trusted(vf) &&
4157 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4158 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4160 /* There is no need to let VF know about being not trusted,
4161 * so we can just return success message here
4166 if (vsi->info.pvid) {
4167 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4171 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
4172 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
4173 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
4174 vlan_promisc = true;
4177 for (i = 0; i < vfl->num_elements; i++) {
4178 u16 vid = vfl->vlan_id[i];
4180 if (!ice_is_vf_trusted(vf) &&
4181 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4182 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4184 /* There is no need to let VF know about being
4185 * not trusted, so we can just return success
4186 * message here as well.
4191 /* we add VLAN 0 by default for each VF so we can enable
4192 * Tx VLAN anti-spoof without triggering MDD events so
4193 * we don't need to add it again here
4198 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
4200 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4204 /* Enable VLAN pruning when non-zero VLAN is added */
4205 if (!vlan_promisc && vid &&
4206 !ice_vsi_is_vlan_pruning_ena(vsi)) {
4207 status = ice_cfg_vlan_pruning(vsi, true, false);
4209 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4210 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
4214 } else if (vlan_promisc) {
4215 /* Enable Ucast/Mcast VLAN promiscuous mode */
4216 promisc_m = ICE_PROMISC_VLAN_TX |
4217 ICE_PROMISC_VLAN_RX;
4219 status = ice_set_vsi_promisc(hw, vsi->idx,
4222 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4223 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
4229 /* In case of non_trusted VF, number of VLAN elements passed
4230 * to PF for removal might be greater than number of VLANs
4231 * filter programmed for that VF - So, use actual number of
4232 * VLANS added earlier with add VLAN opcode. In order to avoid
4233 * removing VLAN that doesn't exist, which result to sending
4234 * erroneous failed message back to the VF
4238 num_vf_vlan = vsi->num_vlan;
4239 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
4240 u16 vid = vfl->vlan_id[i];
4242 /* we add VLAN 0 by default for each VF so we can enable
4243 * Tx VLAN anti-spoof without triggering MDD events so
4244 * we don't want a VIRTCHNL request to remove it
4249 /* Make sure ice_vsi_kill_vlan is successful before
4250 * updating VLAN information
4252 status = ice_vsi_kill_vlan(vsi, vid);
4254 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4258 /* Disable VLAN pruning when only VLAN 0 is left */
4259 if (vsi->num_vlan == 1 &&
4260 ice_vsi_is_vlan_pruning_ena(vsi))
4261 ice_cfg_vlan_pruning(vsi, false, false);
4263 /* Disable Unicast/Multicast VLAN promiscuous mode */
4265 promisc_m = ICE_PROMISC_VLAN_TX |
4266 ICE_PROMISC_VLAN_RX;
4268 ice_clear_vsi_promisc(hw, vsi->idx,
4275 /* send the response to the VF */
4277 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
4280 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
4285 * ice_vc_add_vlan_msg
4286 * @vf: pointer to the VF info
4287 * @msg: pointer to the msg buffer
4289 * Add and program guest VLAN ID
4291 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
4293 return ice_vc_process_vlan_msg(vf, msg, true);
4297 * ice_vc_remove_vlan_msg
4298 * @vf: pointer to the VF info
4299 * @msg: pointer to the msg buffer
4301 * remove programmed guest VLAN ID
4303 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
4305 return ice_vc_process_vlan_msg(vf, msg, false);
4309 * ice_vc_ena_vlan_stripping
4310 * @vf: pointer to the VF info
4312 * Enable VLAN header stripping for a given VF
4314 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
4316 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4317 struct ice_vsi *vsi;
4319 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4320 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4324 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4325 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4329 vsi = ice_get_vf_vsi(vf);
4330 if (ice_vsi_manage_vlan_stripping(vsi, true))
4331 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4334 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
4339 * ice_vc_dis_vlan_stripping
4340 * @vf: pointer to the VF info
4342 * Disable VLAN header stripping for a given VF
4344 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
4346 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4347 struct ice_vsi *vsi;
4349 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4350 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4354 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4355 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4359 vsi = ice_get_vf_vsi(vf);
4361 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4365 if (ice_vsi_manage_vlan_stripping(vsi, false))
4366 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4369 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
4374 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
4375 * @vf: VF to enable/disable VLAN stripping for on initialization
4377 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
4378 * the flag is cleared then we want to disable stripping. For example, the flag
4379 * will be cleared when port VLANs are configured by the administrator before
4380 * passing the VF to the guest or if the AVF driver doesn't support VLAN
4383 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
4385 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
4390 /* don't modify stripping if port VLAN is configured */
4394 if (ice_vf_vlan_offload_ena(vf->driver_caps))
4395 return ice_vsi_manage_vlan_stripping(vsi, true);
4397 return ice_vsi_manage_vlan_stripping(vsi, false);
4401 * ice_vc_process_vf_msg - Process request from VF
4402 * @pf: pointer to the PF structure
4403 * @event: pointer to the AQ event
4405 * called from the common asq/arq handler to
4406 * process request from VF
4408 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
4410 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
4411 s16 vf_id = le16_to_cpu(event->desc.retval);
4412 u16 msglen = event->msg_len;
4413 u8 *msg = event->msg_buf;
4414 struct ice_vf *vf = NULL;
4418 dev = ice_pf_to_dev(pf);
4419 if (ice_validate_vf_id(pf, vf_id)) {
4424 vf = &pf->vf[vf_id];
4426 /* Check if VF is disabled. */
4427 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
4432 /* Perform basic checks on the msg */
4433 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4435 if (err == VIRTCHNL_STATUS_ERR_PARAM)
4441 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
4442 ice_vc_send_msg_to_vf(vf, v_opcode,
4443 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
4450 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
4452 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
4453 vf_id, v_opcode, msglen, err);
4458 case VIRTCHNL_OP_VERSION:
4459 err = ice_vc_get_ver_msg(vf, msg);
4461 case VIRTCHNL_OP_GET_VF_RESOURCES:
4462 err = ice_vc_get_vf_res_msg(vf, msg);
4463 if (ice_vf_init_vlan_stripping(vf))
4464 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
4466 ice_vc_notify_vf_link_state(vf);
4468 case VIRTCHNL_OP_RESET_VF:
4469 ice_vc_reset_vf_msg(vf);
4471 case VIRTCHNL_OP_ADD_ETH_ADDR:
4472 err = ice_vc_add_mac_addr_msg(vf, msg);
4474 case VIRTCHNL_OP_DEL_ETH_ADDR:
4475 err = ice_vc_del_mac_addr_msg(vf, msg);
4477 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4478 err = ice_vc_cfg_qs_msg(vf, msg);
4480 case VIRTCHNL_OP_ENABLE_QUEUES:
4481 err = ice_vc_ena_qs_msg(vf, msg);
4482 ice_vc_notify_vf_link_state(vf);
4484 case VIRTCHNL_OP_DISABLE_QUEUES:
4485 err = ice_vc_dis_qs_msg(vf, msg);
4487 case VIRTCHNL_OP_REQUEST_QUEUES:
4488 err = ice_vc_request_qs_msg(vf, msg);
4490 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4491 err = ice_vc_cfg_irq_map_msg(vf, msg);
4493 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4494 err = ice_vc_config_rss_key(vf, msg);
4496 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4497 err = ice_vc_config_rss_lut(vf, msg);
4499 case VIRTCHNL_OP_GET_STATS:
4500 err = ice_vc_get_stats_msg(vf, msg);
4502 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4503 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
4505 case VIRTCHNL_OP_ADD_VLAN:
4506 err = ice_vc_add_vlan_msg(vf, msg);
4508 case VIRTCHNL_OP_DEL_VLAN:
4509 err = ice_vc_remove_vlan_msg(vf, msg);
4511 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4512 err = ice_vc_ena_vlan_stripping(vf);
4514 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4515 err = ice_vc_dis_vlan_stripping(vf);
4517 case VIRTCHNL_OP_ADD_FDIR_FILTER:
4518 err = ice_vc_add_fdir_fltr(vf, msg);
4520 case VIRTCHNL_OP_DEL_FDIR_FILTER:
4521 err = ice_vc_del_fdir_fltr(vf, msg);
4523 case VIRTCHNL_OP_ADD_RSS_CFG:
4524 err = ice_vc_handle_rss_cfg(vf, msg, true);
4526 case VIRTCHNL_OP_DEL_RSS_CFG:
4527 err = ice_vc_handle_rss_cfg(vf, msg, false);
4529 case VIRTCHNL_OP_UNKNOWN:
4531 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
4533 err = ice_vc_send_msg_to_vf(vf, v_opcode,
4534 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
4539 /* Helper function cares less about error return values here
4540 * as it is busy with pending work.
4542 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
4543 vf_id, v_opcode, err);
4549 * @netdev: network interface device structure
4550 * @vf_id: VF identifier
4551 * @ivi: VF configuration structure
4553 * return VF configuration
4556 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
4558 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4561 if (ice_validate_vf_id(pf, vf_id))
4564 vf = &pf->vf[vf_id];
4566 if (ice_check_vf_init(pf, vf))
4570 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
4572 /* VF configuration for VLAN and applicable QoS */
4573 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
4574 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
4576 ivi->trusted = vf->trusted;
4577 ivi->spoofchk = vf->spoofchk;
4578 if (!vf->link_forced)
4579 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4580 else if (vf->link_up)
4581 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4583 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4584 ivi->max_tx_rate = vf->tx_rate;
4585 ivi->min_tx_rate = 0;
4590 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
4591 * @pf: PF used to reference the switch's rules
4592 * @umac: unicast MAC to compare against existing switch rules
4594 * Return true on the first/any match, else return false
4596 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
4598 struct ice_sw_recipe *mac_recipe_list =
4599 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
4600 struct ice_fltr_mgmt_list_entry *list_itr;
4601 struct list_head *rule_head;
4602 struct mutex *rule_lock; /* protect MAC filter list access */
4604 rule_head = &mac_recipe_list->filt_rules;
4605 rule_lock = &mac_recipe_list->filt_rule_lock;
4607 mutex_lock(rule_lock);
4608 list_for_each_entry(list_itr, rule_head, list_entry) {
4609 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4611 if (ether_addr_equal(existing_mac, umac)) {
4612 mutex_unlock(rule_lock);
4617 mutex_unlock(rule_lock);
4624 * @netdev: network interface device structure
4625 * @vf_id: VF identifier
4628 * program VF MAC address
4630 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4632 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4636 if (ice_validate_vf_id(pf, vf_id))
4639 if (is_multicast_ether_addr(mac)) {
4640 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
4644 vf = &pf->vf[vf_id];
4645 /* nothing left to do, unicast MAC already set */
4646 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
4647 ether_addr_equal(vf->hw_lan_addr.addr, mac))
4650 ret = ice_check_vf_ready_for_cfg(vf);
4654 if (ice_unicast_mac_exists(pf, mac)) {
4655 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
4660 /* VF is notified of its new MAC via the PF's response to the
4661 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
4663 ether_addr_copy(vf->dev_lan_addr.addr, mac);
4664 ether_addr_copy(vf->hw_lan_addr.addr, mac);
4665 if (is_zero_ether_addr(mac)) {
4666 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
4667 vf->pf_set_mac = false;
4668 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
4671 /* PF will add MAC rule for the VF */
4672 vf->pf_set_mac = true;
4673 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
4677 ice_vc_reset_vf(vf);
4683 * @netdev: network interface device structure
4684 * @vf_id: VF identifier
4685 * @trusted: Boolean value to enable/disable trusted VF
4687 * Enable or disable a given VF as trusted
4689 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
4691 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4695 if (ice_validate_vf_id(pf, vf_id))
4698 vf = &pf->vf[vf_id];
4699 ret = ice_check_vf_ready_for_cfg(vf);
4703 /* Check if already trusted */
4704 if (trusted == vf->trusted)
4707 vf->trusted = trusted;
4708 ice_vc_reset_vf(vf);
4709 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
4710 vf_id, trusted ? "" : "un");
4716 * ice_set_vf_link_state
4717 * @netdev: network interface device structure
4718 * @vf_id: VF identifier
4719 * @link_state: required link state
4721 * Set VF's link state, irrespective of physical link state status
4723 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
4725 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4729 if (ice_validate_vf_id(pf, vf_id))
4732 vf = &pf->vf[vf_id];
4733 ret = ice_check_vf_ready_for_cfg(vf);
4737 switch (link_state) {
4738 case IFLA_VF_LINK_STATE_AUTO:
4739 vf->link_forced = false;
4741 case IFLA_VF_LINK_STATE_ENABLE:
4742 vf->link_forced = true;
4745 case IFLA_VF_LINK_STATE_DISABLE:
4746 vf->link_forced = true;
4747 vf->link_up = false;
4753 ice_vc_notify_vf_link_state(vf);
4759 * ice_get_vf_stats - populate some stats for the VF
4760 * @netdev: the netdev of the PF
4761 * @vf_id: the host OS identifier (0-255)
4762 * @vf_stats: pointer to the OS memory to be initialized
4764 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4765 struct ifla_vf_stats *vf_stats)
4767 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4768 struct ice_eth_stats *stats;
4769 struct ice_vsi *vsi;
4773 if (ice_validate_vf_id(pf, vf_id))
4776 vf = &pf->vf[vf_id];
4777 ret = ice_check_vf_ready_for_cfg(vf);
4781 vsi = ice_get_vf_vsi(vf);
4785 ice_update_eth_stats(vsi);
4786 stats = &vsi->eth_stats;
4788 memset(vf_stats, 0, sizeof(*vf_stats));
4790 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4791 stats->rx_multicast;
4792 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4793 stats->tx_multicast;
4794 vf_stats->rx_bytes = stats->rx_bytes;
4795 vf_stats->tx_bytes = stats->tx_bytes;
4796 vf_stats->broadcast = stats->rx_broadcast;
4797 vf_stats->multicast = stats->rx_multicast;
4798 vf_stats->rx_dropped = stats->rx_discards;
4799 vf_stats->tx_dropped = stats->tx_discards;
4805 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4806 * @vf: pointer to the VF structure
4808 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4810 struct ice_pf *pf = vf->pf;
4813 dev = ice_pf_to_dev(pf);
4815 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4816 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4817 vf->dev_lan_addr.addr,
4818 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4823 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
4824 * @pf: pointer to the PF structure
4826 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4828 void ice_print_vfs_mdd_events(struct ice_pf *pf)
4830 struct device *dev = ice_pf_to_dev(pf);
4831 struct ice_hw *hw = &pf->hw;
4834 /* check that there are pending MDD events to print */
4835 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
4838 /* VF MDD event logs are rate limited to one second intervals */
4839 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4842 pf->last_printed_mdd_jiffies = jiffies;
4844 ice_for_each_vf(pf, i) {
4845 struct ice_vf *vf = &pf->vf[i];
4847 /* only print Rx MDD event message if there are new events */
4848 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4849 vf->mdd_rx_events.last_printed =
4850 vf->mdd_rx_events.count;
4851 ice_print_vf_rx_mdd_event(vf);
4854 /* only print Tx MDD event message if there are new events */
4855 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4856 vf->mdd_tx_events.last_printed =
4857 vf->mdd_tx_events.count;
4859 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4860 vf->mdd_tx_events.count, hw->pf_id, i,
4861 vf->dev_lan_addr.addr);
4867 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4868 * @pdev: pointer to a pci_dev structure
4870 * Called when recovering from a PF FLR to restore interrupt capability to
4873 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4878 if (!pci_num_vf(pdev))
4881 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4883 struct pci_dev *vfdev;
4885 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4887 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4889 if (vfdev->is_virtfn && vfdev->physfn == pdev)
4890 pci_restore_msi_state(vfdev);
4891 vfdev = pci_get_device(pdev->vendor, vf_id,
4898 * ice_is_malicious_vf - helper function to detect a malicious VF
4899 * @pf: ptr to struct ice_pf
4900 * @event: pointer to the AQ event
4901 * @num_msg_proc: the number of messages processed so far
4902 * @num_msg_pending: the number of messages peinding in admin queue
4905 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
4906 u16 num_msg_proc, u16 num_msg_pending)
4908 s16 vf_id = le16_to_cpu(event->desc.retval);
4909 struct device *dev = ice_pf_to_dev(pf);
4910 struct ice_mbx_data mbxdata;
4911 enum ice_status status;
4915 if (ice_validate_vf_id(pf, vf_id))
4918 vf = &pf->vf[vf_id];
4919 /* Check if VF is disabled. */
4920 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
4923 mbxdata.num_msg_proc = num_msg_proc;
4924 mbxdata.num_pending_arq = num_msg_pending;
4925 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
4926 #define ICE_MBX_OVERFLOW_WATERMARK 64
4927 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
4929 /* check to see if we have a malicious VF */
4930 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
4935 bool report_vf = false;
4937 /* if the VF is malicious and we haven't let the user
4938 * know about it, then let them know now
4940 status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
4941 ICE_MAX_VF_COUNT, vf_id,
4944 dev_dbg(dev, "Error reporting malicious VF\n");
4947 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
4950 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
4951 &vf->dev_lan_addr.addr[0],
4952 pf_vsi->netdev->dev_addr);
4958 /* if there was an error in detection or the VF is not malicious then