1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/pci.h>
30 #include <linux/delay.h>
31 #include <linux/sched.h>
32 #include <linux/netdevice.h>
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
38 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
41 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
42 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
43 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
46 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
51 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
52 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
53 u16 words, u16 *data);
54 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
58 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
61 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
63 * @hw: pointer to hardware structure
65 * There are several phys that do not support autoneg flow control. This
66 * function check the device id to see if the associated phy supports
67 * autoneg flow control.
69 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
71 bool supported = false;
72 ixgbe_link_speed speed;
75 switch (hw->phy.media_type) {
76 case ixgbe_media_type_fiber:
77 hw->mac.ops.check_link(hw, &speed, &link_up, false);
78 /* if link is down, assume supported */
80 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
85 case ixgbe_media_type_backplane:
88 case ixgbe_media_type_copper:
89 /* only some copper devices support flow control autoneg */
90 switch (hw->device_id) {
91 case IXGBE_DEV_ID_82599_T3_LOM:
92 case IXGBE_DEV_ID_X540T:
93 case IXGBE_DEV_ID_X540T1:
107 * ixgbe_setup_fc - Set up flow control
108 * @hw: pointer to hardware structure
110 * Called at init time to set up flow control.
112 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
115 u32 reg = 0, reg_bp = 0;
120 * Validate the requested mode. Strict IEEE mode does not allow
121 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
123 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
124 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
125 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
130 * 10gig parts do not have a word in the EEPROM to determine the
131 * default flow control setting, so we explicitly set it to full.
133 if (hw->fc.requested_mode == ixgbe_fc_default)
134 hw->fc.requested_mode = ixgbe_fc_full;
137 * Set up the 1G and 10G flow control advertisement registers so the
138 * HW will be able to do fc autoneg once the cable is plugged in. If
139 * we link at 10G, the 1G advertisement is harmless and vice versa.
141 switch (hw->phy.media_type) {
142 case ixgbe_media_type_backplane:
143 /* some MAC's need RMW protection on AUTOC */
144 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
148 /* only backplane uses autoc so fall though */
149 case ixgbe_media_type_fiber:
150 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
153 case ixgbe_media_type_copper:
154 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
155 MDIO_MMD_AN, ®_cu);
162 * The possible values of fc.requested_mode are:
163 * 0: Flow control is completely disabled
164 * 1: Rx flow control is enabled (we can receive pause frames,
165 * but not send pause frames).
166 * 2: Tx flow control is enabled (we can send pause frames but
167 * we do not support receiving pause frames).
168 * 3: Both Rx and Tx flow control (symmetric) are enabled.
171 switch (hw->fc.requested_mode) {
173 /* Flow control completely disabled by software override. */
174 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
175 if (hw->phy.media_type == ixgbe_media_type_backplane)
176 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
177 IXGBE_AUTOC_ASM_PAUSE);
178 else if (hw->phy.media_type == ixgbe_media_type_copper)
179 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
181 case ixgbe_fc_tx_pause:
183 * Tx Flow control is enabled, and Rx Flow control is
184 * disabled by software override.
186 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
187 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
188 if (hw->phy.media_type == ixgbe_media_type_backplane) {
189 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
190 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
191 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
192 reg_cu |= IXGBE_TAF_ASM_PAUSE;
193 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
196 case ixgbe_fc_rx_pause:
198 * Rx Flow control is enabled and Tx Flow control is
199 * disabled by software override. Since there really
200 * isn't a way to advertise that we are capable of RX
201 * Pause ONLY, we will advertise that we support both
202 * symmetric and asymmetric Rx PAUSE, as such we fall
203 * through to the fc_full statement. Later, we will
204 * disable the adapter's ability to send PAUSE frames.
207 /* Flow control (both Rx and Tx) is enabled by SW override. */
208 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
209 if (hw->phy.media_type == ixgbe_media_type_backplane)
210 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
211 IXGBE_AUTOC_ASM_PAUSE;
212 else if (hw->phy.media_type == ixgbe_media_type_copper)
213 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
216 hw_dbg(hw, "Flow control param set incorrectly\n");
217 ret_val = IXGBE_ERR_CONFIG;
222 if (hw->mac.type != ixgbe_mac_X540) {
224 * Enable auto-negotiation between the MAC & PHY;
225 * the MAC will advertise clause 37 flow control.
227 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
228 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
230 /* Disable AN timeout */
231 if (hw->fc.strict_ieee)
232 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
234 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
235 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
239 * AUTOC restart handles negotiation of 1G and 10G on backplane
240 * and copper. There is no need to set the PCS1GCTL register.
243 if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
245 * LESM is on, likewise reset_pipeline requries the lock as
246 * it also writes AUTOC.
248 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
252 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
253 ixgbe_device_supports_autoneg_fc(hw)) {
254 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
255 MDIO_MMD_AN, reg_cu);
258 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
264 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
265 * @hw: pointer to hardware structure
267 * Starts the hardware by filling the bus info structure and media type, clears
268 * all on chip counters, initializes receive address registers, multicast
269 * table, VLAN filter table, calls routine to set up link and flow control
270 * settings, and leaves transmit and receive units disabled and uninitialized
272 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
277 /* Set the media type */
278 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
280 /* Identify the PHY */
281 hw->phy.ops.identify(hw);
283 /* Clear the VLAN filter table */
284 hw->mac.ops.clear_vfta(hw);
286 /* Clear statistics registers */
287 hw->mac.ops.clear_hw_cntrs(hw);
289 /* Set No Snoop Disable */
290 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
291 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
292 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
293 IXGBE_WRITE_FLUSH(hw);
295 /* Setup flow control */
296 ret_val = ixgbe_setup_fc(hw);
300 /* Clear adapter stopped flag */
301 hw->adapter_stopped = false;
308 * ixgbe_start_hw_gen2 - Init sequence for common device family
309 * @hw: pointer to hw structure
311 * Performs the init sequence common to the second generation
313 * Devices in the second generation:
317 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
322 /* Clear the rate limiters */
323 for (i = 0; i < hw->mac.max_tx_queues; i++) {
324 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
325 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
327 IXGBE_WRITE_FLUSH(hw);
329 /* Disable relaxed ordering */
330 for (i = 0; i < hw->mac.max_tx_queues; i++) {
331 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
332 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
333 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
336 for (i = 0; i < hw->mac.max_rx_queues; i++) {
337 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
338 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
339 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
340 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
347 * ixgbe_init_hw_generic - Generic hardware initialization
348 * @hw: pointer to hardware structure
350 * Initialize the hardware by resetting the hardware, filling the bus info
351 * structure and media type, clears all on chip counters, initializes receive
352 * address registers, multicast table, VLAN filter table, calls routine to set
353 * up link and flow control settings, and leaves transmit and receive units
354 * disabled and uninitialized
356 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
360 /* Reset the hardware */
361 status = hw->mac.ops.reset_hw(hw);
365 status = hw->mac.ops.start_hw(hw);
372 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
373 * @hw: pointer to hardware structure
375 * Clears all hardware statistics counters by reading them from the hardware
376 * Statistics counters are clear on read.
378 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
382 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
383 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
384 IXGBE_READ_REG(hw, IXGBE_ERRBC);
385 IXGBE_READ_REG(hw, IXGBE_MSPDC);
386 for (i = 0; i < 8; i++)
387 IXGBE_READ_REG(hw, IXGBE_MPC(i));
389 IXGBE_READ_REG(hw, IXGBE_MLFC);
390 IXGBE_READ_REG(hw, IXGBE_MRFC);
391 IXGBE_READ_REG(hw, IXGBE_RLEC);
392 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
393 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
394 if (hw->mac.type >= ixgbe_mac_82599EB) {
395 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
396 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
398 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
399 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
402 for (i = 0; i < 8; i++) {
403 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
404 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
405 if (hw->mac.type >= ixgbe_mac_82599EB) {
406 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
407 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
409 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
410 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
413 if (hw->mac.type >= ixgbe_mac_82599EB)
414 for (i = 0; i < 8; i++)
415 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
416 IXGBE_READ_REG(hw, IXGBE_PRC64);
417 IXGBE_READ_REG(hw, IXGBE_PRC127);
418 IXGBE_READ_REG(hw, IXGBE_PRC255);
419 IXGBE_READ_REG(hw, IXGBE_PRC511);
420 IXGBE_READ_REG(hw, IXGBE_PRC1023);
421 IXGBE_READ_REG(hw, IXGBE_PRC1522);
422 IXGBE_READ_REG(hw, IXGBE_GPRC);
423 IXGBE_READ_REG(hw, IXGBE_BPRC);
424 IXGBE_READ_REG(hw, IXGBE_MPRC);
425 IXGBE_READ_REG(hw, IXGBE_GPTC);
426 IXGBE_READ_REG(hw, IXGBE_GORCL);
427 IXGBE_READ_REG(hw, IXGBE_GORCH);
428 IXGBE_READ_REG(hw, IXGBE_GOTCL);
429 IXGBE_READ_REG(hw, IXGBE_GOTCH);
430 if (hw->mac.type == ixgbe_mac_82598EB)
431 for (i = 0; i < 8; i++)
432 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
433 IXGBE_READ_REG(hw, IXGBE_RUC);
434 IXGBE_READ_REG(hw, IXGBE_RFC);
435 IXGBE_READ_REG(hw, IXGBE_ROC);
436 IXGBE_READ_REG(hw, IXGBE_RJC);
437 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
438 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
439 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
440 IXGBE_READ_REG(hw, IXGBE_TORL);
441 IXGBE_READ_REG(hw, IXGBE_TORH);
442 IXGBE_READ_REG(hw, IXGBE_TPR);
443 IXGBE_READ_REG(hw, IXGBE_TPT);
444 IXGBE_READ_REG(hw, IXGBE_PTC64);
445 IXGBE_READ_REG(hw, IXGBE_PTC127);
446 IXGBE_READ_REG(hw, IXGBE_PTC255);
447 IXGBE_READ_REG(hw, IXGBE_PTC511);
448 IXGBE_READ_REG(hw, IXGBE_PTC1023);
449 IXGBE_READ_REG(hw, IXGBE_PTC1522);
450 IXGBE_READ_REG(hw, IXGBE_MPTC);
451 IXGBE_READ_REG(hw, IXGBE_BPTC);
452 for (i = 0; i < 16; i++) {
453 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
454 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
455 if (hw->mac.type >= ixgbe_mac_82599EB) {
456 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
457 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
458 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
459 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
460 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
462 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
463 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
467 if (hw->mac.type == ixgbe_mac_X540) {
469 hw->phy.ops.identify(hw);
470 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
471 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
472 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
473 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
480 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
481 * @hw: pointer to hardware structure
482 * @pba_num: stores the part number string from the EEPROM
483 * @pba_num_size: part number string buffer length
485 * Reads the part number string from the EEPROM.
487 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
496 if (pba_num == NULL) {
497 hw_dbg(hw, "PBA string buffer was null\n");
498 return IXGBE_ERR_INVALID_ARGUMENT;
501 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
503 hw_dbg(hw, "NVM Read Error\n");
507 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
509 hw_dbg(hw, "NVM Read Error\n");
514 * if data is not ptr guard the PBA must be in legacy format which
515 * means pba_ptr is actually our second data word for the PBA number
516 * and we can decode it into an ascii string
518 if (data != IXGBE_PBANUM_PTR_GUARD) {
519 hw_dbg(hw, "NVM PBA number is not stored as string\n");
521 /* we will need 11 characters to store the PBA */
522 if (pba_num_size < 11) {
523 hw_dbg(hw, "PBA string buffer too small\n");
524 return IXGBE_ERR_NO_SPACE;
527 /* extract hex string from data and pba_ptr */
528 pba_num[0] = (data >> 12) & 0xF;
529 pba_num[1] = (data >> 8) & 0xF;
530 pba_num[2] = (data >> 4) & 0xF;
531 pba_num[3] = data & 0xF;
532 pba_num[4] = (pba_ptr >> 12) & 0xF;
533 pba_num[5] = (pba_ptr >> 8) & 0xF;
536 pba_num[8] = (pba_ptr >> 4) & 0xF;
537 pba_num[9] = pba_ptr & 0xF;
539 /* put a null character on the end of our string */
542 /* switch all the data but the '-' to hex char */
543 for (offset = 0; offset < 10; offset++) {
544 if (pba_num[offset] < 0xA)
545 pba_num[offset] += '0';
546 else if (pba_num[offset] < 0x10)
547 pba_num[offset] += 'A' - 0xA;
553 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
555 hw_dbg(hw, "NVM Read Error\n");
559 if (length == 0xFFFF || length == 0) {
560 hw_dbg(hw, "NVM PBA number section invalid length\n");
561 return IXGBE_ERR_PBA_SECTION;
564 /* check if pba_num buffer is big enough */
565 if (pba_num_size < (((u32)length * 2) - 1)) {
566 hw_dbg(hw, "PBA string buffer too small\n");
567 return IXGBE_ERR_NO_SPACE;
570 /* trim pba length from start of string */
574 for (offset = 0; offset < length; offset++) {
575 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
577 hw_dbg(hw, "NVM Read Error\n");
580 pba_num[offset * 2] = (u8)(data >> 8);
581 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
583 pba_num[offset * 2] = '\0';
589 * ixgbe_get_mac_addr_generic - Generic get MAC address
590 * @hw: pointer to hardware structure
591 * @mac_addr: Adapter MAC address
593 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
594 * A reset of the adapter must be performed prior to calling this function
595 * in order for the MAC address to have been loaded from the EEPROM into RAR0
597 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
603 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
604 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
606 for (i = 0; i < 4; i++)
607 mac_addr[i] = (u8)(rar_low >> (i*8));
609 for (i = 0; i < 2; i++)
610 mac_addr[i+4] = (u8)(rar_high >> (i*8));
615 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
617 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
618 case IXGBE_PCI_LINK_WIDTH_1:
619 return ixgbe_bus_width_pcie_x1;
620 case IXGBE_PCI_LINK_WIDTH_2:
621 return ixgbe_bus_width_pcie_x2;
622 case IXGBE_PCI_LINK_WIDTH_4:
623 return ixgbe_bus_width_pcie_x4;
624 case IXGBE_PCI_LINK_WIDTH_8:
625 return ixgbe_bus_width_pcie_x8;
627 return ixgbe_bus_width_unknown;
631 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
633 switch (link_status & IXGBE_PCI_LINK_SPEED) {
634 case IXGBE_PCI_LINK_SPEED_2500:
635 return ixgbe_bus_speed_2500;
636 case IXGBE_PCI_LINK_SPEED_5000:
637 return ixgbe_bus_speed_5000;
638 case IXGBE_PCI_LINK_SPEED_8000:
639 return ixgbe_bus_speed_8000;
641 return ixgbe_bus_speed_unknown;
646 * ixgbe_get_bus_info_generic - Generic set PCI bus info
647 * @hw: pointer to hardware structure
649 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
651 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
655 hw->bus.type = ixgbe_bus_type_pci_express;
657 /* Get the negotiated link width and speed from PCI config space */
658 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
660 hw->bus.width = ixgbe_convert_bus_width(link_status);
661 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
663 hw->mac.ops.set_lan_id(hw);
669 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
670 * @hw: pointer to the HW structure
672 * Determines the LAN function id by reading memory-mapped registers
673 * and swaps the port value if requested.
675 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
677 struct ixgbe_bus_info *bus = &hw->bus;
680 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
681 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
682 bus->lan_id = bus->func;
684 /* check for a port swap */
685 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
686 if (reg & IXGBE_FACTPS_LFS)
691 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
692 * @hw: pointer to hardware structure
694 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
695 * disables transmit and receive units. The adapter_stopped flag is used by
696 * the shared code and drivers to determine if the adapter is in a stopped
697 * state and should not touch the hardware.
699 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
705 * Set the adapter_stopped flag so other driver functions stop touching
708 hw->adapter_stopped = true;
710 /* Disable the receive unit */
711 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
713 /* Clear interrupt mask to stop interrupts from being generated */
714 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
716 /* Clear any pending interrupts, flush previous writes */
717 IXGBE_READ_REG(hw, IXGBE_EICR);
719 /* Disable the transmit unit. Each queue must be disabled. */
720 for (i = 0; i < hw->mac.max_tx_queues; i++)
721 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
723 /* Disable the receive unit by stopping each queue */
724 for (i = 0; i < hw->mac.max_rx_queues; i++) {
725 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
726 reg_val &= ~IXGBE_RXDCTL_ENABLE;
727 reg_val |= IXGBE_RXDCTL_SWFLSH;
728 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
731 /* flush all queues disables */
732 IXGBE_WRITE_FLUSH(hw);
733 usleep_range(1000, 2000);
736 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
737 * access and verify no pending requests
739 return ixgbe_disable_pcie_master(hw);
743 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
744 * @hw: pointer to hardware structure
745 * @index: led number to turn on
747 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
749 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
751 /* To turn on the LED, set mode to ON. */
752 led_reg &= ~IXGBE_LED_MODE_MASK(index);
753 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
754 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
755 IXGBE_WRITE_FLUSH(hw);
761 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
762 * @hw: pointer to hardware structure
763 * @index: led number to turn off
765 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
767 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
769 /* To turn off the LED, set mode to OFF. */
770 led_reg &= ~IXGBE_LED_MODE_MASK(index);
771 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
772 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
773 IXGBE_WRITE_FLUSH(hw);
779 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
780 * @hw: pointer to hardware structure
782 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
783 * ixgbe_hw struct in order to set up EEPROM access.
785 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
787 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
791 if (eeprom->type == ixgbe_eeprom_uninitialized) {
792 eeprom->type = ixgbe_eeprom_none;
793 /* Set default semaphore delay to 10ms which is a well
795 eeprom->semaphore_delay = 10;
796 /* Clear EEPROM page size, it will be initialized as needed */
797 eeprom->word_page_size = 0;
800 * Check for EEPROM present first.
801 * If not present leave as none
803 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
804 if (eec & IXGBE_EEC_PRES) {
805 eeprom->type = ixgbe_eeprom_spi;
808 * SPI EEPROM is assumed here. This code would need to
809 * change if a future EEPROM is not SPI.
811 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
812 IXGBE_EEC_SIZE_SHIFT);
813 eeprom->word_size = 1 << (eeprom_size +
814 IXGBE_EEPROM_WORD_SIZE_SHIFT);
817 if (eec & IXGBE_EEC_ADDR_SIZE)
818 eeprom->address_bits = 16;
820 eeprom->address_bits = 8;
821 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
822 eeprom->type, eeprom->word_size, eeprom->address_bits);
829 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
830 * @hw: pointer to hardware structure
831 * @offset: offset within the EEPROM to write
832 * @words: number of words
833 * @data: 16 bit word(s) to write to EEPROM
835 * Reads 16 bit word(s) from EEPROM through bit-bang method
837 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
838 u16 words, u16 *data)
843 hw->eeprom.ops.init_params(hw);
846 status = IXGBE_ERR_INVALID_ARGUMENT;
850 if (offset + words > hw->eeprom.word_size) {
851 status = IXGBE_ERR_EEPROM;
856 * The EEPROM page size cannot be queried from the chip. We do lazy
857 * initialization. It is worth to do that when we write large buffer.
859 if ((hw->eeprom.word_page_size == 0) &&
860 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
861 ixgbe_detect_eeprom_page_size_generic(hw, offset);
864 * We cannot hold synchronization semaphores for too long
865 * to avoid other entity starvation. However it is more efficient
866 * to read in bursts than synchronizing access for each word.
868 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
869 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
870 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
871 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
883 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
884 * @hw: pointer to hardware structure
885 * @offset: offset within the EEPROM to be written to
886 * @words: number of word(s)
887 * @data: 16 bit word(s) to be written to the EEPROM
889 * If ixgbe_eeprom_update_checksum is not called after this function, the
890 * EEPROM will most likely contain an invalid checksum.
892 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
893 u16 words, u16 *data)
899 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
901 /* Prepare the EEPROM for writing */
902 status = ixgbe_acquire_eeprom(hw);
905 if (ixgbe_ready_eeprom(hw) != 0) {
906 ixgbe_release_eeprom(hw);
907 status = IXGBE_ERR_EEPROM;
912 for (i = 0; i < words; i++) {
913 ixgbe_standby_eeprom(hw);
915 /* Send the WRITE ENABLE command (8 bit opcode ) */
916 ixgbe_shift_out_eeprom_bits(hw,
917 IXGBE_EEPROM_WREN_OPCODE_SPI,
918 IXGBE_EEPROM_OPCODE_BITS);
920 ixgbe_standby_eeprom(hw);
923 * Some SPI eeproms use the 8th address bit embedded
926 if ((hw->eeprom.address_bits == 8) &&
927 ((offset + i) >= 128))
928 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
930 /* Send the Write command (8-bit opcode + addr) */
931 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
932 IXGBE_EEPROM_OPCODE_BITS);
933 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
934 hw->eeprom.address_bits);
936 page_size = hw->eeprom.word_page_size;
938 /* Send the data in burst via SPI*/
941 word = (word >> 8) | (word << 8);
942 ixgbe_shift_out_eeprom_bits(hw, word, 16);
947 /* do not wrap around page */
948 if (((offset + i) & (page_size - 1)) ==
951 } while (++i < words);
953 ixgbe_standby_eeprom(hw);
954 usleep_range(10000, 20000);
956 /* Done with writing - release the EEPROM */
957 ixgbe_release_eeprom(hw);
964 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
965 * @hw: pointer to hardware structure
966 * @offset: offset within the EEPROM to be written to
967 * @data: 16 bit word to be written to the EEPROM
969 * If ixgbe_eeprom_update_checksum is not called after this function, the
970 * EEPROM will most likely contain an invalid checksum.
972 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
976 hw->eeprom.ops.init_params(hw);
978 if (offset >= hw->eeprom.word_size) {
979 status = IXGBE_ERR_EEPROM;
983 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
990 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
991 * @hw: pointer to hardware structure
992 * @offset: offset within the EEPROM to be read
993 * @words: number of word(s)
994 * @data: read 16 bit words(s) from EEPROM
996 * Reads 16 bit word(s) from EEPROM through bit-bang method
998 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
999 u16 words, u16 *data)
1004 hw->eeprom.ops.init_params(hw);
1007 status = IXGBE_ERR_INVALID_ARGUMENT;
1011 if (offset + words > hw->eeprom.word_size) {
1012 status = IXGBE_ERR_EEPROM;
1017 * We cannot hold synchronization semaphores for too long
1018 * to avoid other entity starvation. However it is more efficient
1019 * to read in bursts than synchronizing access for each word.
1021 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1022 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1023 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1025 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1037 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1038 * @hw: pointer to hardware structure
1039 * @offset: offset within the EEPROM to be read
1040 * @words: number of word(s)
1041 * @data: read 16 bit word(s) from EEPROM
1043 * Reads 16 bit word(s) from EEPROM through bit-bang method
1045 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1046 u16 words, u16 *data)
1050 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1053 /* Prepare the EEPROM for reading */
1054 status = ixgbe_acquire_eeprom(hw);
1057 if (ixgbe_ready_eeprom(hw) != 0) {
1058 ixgbe_release_eeprom(hw);
1059 status = IXGBE_ERR_EEPROM;
1064 for (i = 0; i < words; i++) {
1065 ixgbe_standby_eeprom(hw);
1067 * Some SPI eeproms use the 8th address bit embedded
1070 if ((hw->eeprom.address_bits == 8) &&
1071 ((offset + i) >= 128))
1072 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1074 /* Send the READ command (opcode + addr) */
1075 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1076 IXGBE_EEPROM_OPCODE_BITS);
1077 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1078 hw->eeprom.address_bits);
1080 /* Read the data. */
1081 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1082 data[i] = (word_in >> 8) | (word_in << 8);
1085 /* End this read operation */
1086 ixgbe_release_eeprom(hw);
1093 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1094 * @hw: pointer to hardware structure
1095 * @offset: offset within the EEPROM to be read
1096 * @data: read 16 bit value from EEPROM
1098 * Reads 16 bit value from EEPROM through bit-bang method
1100 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1105 hw->eeprom.ops.init_params(hw);
1107 if (offset >= hw->eeprom.word_size) {
1108 status = IXGBE_ERR_EEPROM;
1112 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1119 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1120 * @hw: pointer to hardware structure
1121 * @offset: offset of word in the EEPROM to read
1122 * @words: number of word(s)
1123 * @data: 16 bit word(s) from the EEPROM
1125 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1127 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1128 u16 words, u16 *data)
1134 hw->eeprom.ops.init_params(hw);
1137 status = IXGBE_ERR_INVALID_ARGUMENT;
1141 if (offset >= hw->eeprom.word_size) {
1142 status = IXGBE_ERR_EEPROM;
1146 for (i = 0; i < words; i++) {
1147 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1148 IXGBE_EEPROM_RW_REG_START;
1150 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1151 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1154 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1155 IXGBE_EEPROM_RW_REG_DATA);
1157 hw_dbg(hw, "Eeprom read timed out\n");
1166 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1167 * @hw: pointer to hardware structure
1168 * @offset: offset within the EEPROM to be used as a scratch pad
1170 * Discover EEPROM page size by writing marching data at given offset.
1171 * This function is called only when we are writing a new large buffer
1172 * at given offset so the data would be overwritten anyway.
1174 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1177 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1181 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1184 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1185 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1186 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1187 hw->eeprom.word_page_size = 0;
1191 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1196 * When writing in burst more than the actual page size
1197 * EEPROM address wraps around current page.
1199 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1201 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1202 hw->eeprom.word_page_size);
1208 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1209 * @hw: pointer to hardware structure
1210 * @offset: offset of word in the EEPROM to read
1211 * @data: word read from the EEPROM
1213 * Reads a 16 bit word from the EEPROM using the EERD register.
1215 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1217 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1221 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1222 * @hw: pointer to hardware structure
1223 * @offset: offset of word in the EEPROM to write
1224 * @words: number of words
1225 * @data: word(s) write to the EEPROM
1227 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1229 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1230 u16 words, u16 *data)
1236 hw->eeprom.ops.init_params(hw);
1239 status = IXGBE_ERR_INVALID_ARGUMENT;
1243 if (offset >= hw->eeprom.word_size) {
1244 status = IXGBE_ERR_EEPROM;
1248 for (i = 0; i < words; i++) {
1249 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1250 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1251 IXGBE_EEPROM_RW_REG_START;
1253 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1255 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1259 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1261 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1263 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1273 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1274 * @hw: pointer to hardware structure
1275 * @offset: offset of word in the EEPROM to write
1276 * @data: word write to the EEPROM
1278 * Write a 16 bit word to the EEPROM using the EEWR register.
1280 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1282 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1286 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1287 * @hw: pointer to hardware structure
1288 * @ee_reg: EEPROM flag for polling
1290 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1291 * read or write is done respectively.
1293 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1297 s32 status = IXGBE_ERR_EEPROM;
1299 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1300 if (ee_reg == IXGBE_NVM_POLL_READ)
1301 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1303 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1305 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1315 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1316 * @hw: pointer to hardware structure
1318 * Prepares EEPROM for access using bit-bang method. This function should
1319 * be called before issuing a command to the EEPROM.
1321 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1327 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1328 status = IXGBE_ERR_SWFW_SYNC;
1331 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1333 /* Request EEPROM Access */
1334 eec |= IXGBE_EEC_REQ;
1335 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1337 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1338 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1339 if (eec & IXGBE_EEC_GNT)
1344 /* Release if grant not acquired */
1345 if (!(eec & IXGBE_EEC_GNT)) {
1346 eec &= ~IXGBE_EEC_REQ;
1347 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1348 hw_dbg(hw, "Could not acquire EEPROM grant\n");
1350 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1351 status = IXGBE_ERR_EEPROM;
1354 /* Setup EEPROM for Read/Write */
1356 /* Clear CS and SK */
1357 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1358 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1359 IXGBE_WRITE_FLUSH(hw);
1367 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1368 * @hw: pointer to hardware structure
1370 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1372 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1374 s32 status = IXGBE_ERR_EEPROM;
1379 /* Get SMBI software semaphore between device drivers first */
1380 for (i = 0; i < timeout; i++) {
1382 * If the SMBI bit is 0 when we read it, then the bit will be
1383 * set and we have the semaphore
1385 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1386 if (!(swsm & IXGBE_SWSM_SMBI)) {
1394 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1396 * this release is particularly important because our attempts
1397 * above to get the semaphore may have succeeded, and if there
1398 * was a timeout, we should unconditionally clear the semaphore
1399 * bits to free the driver to make progress
1401 ixgbe_release_eeprom_semaphore(hw);
1406 * If the SMBI bit is 0 when we read it, then the bit will be
1407 * set and we have the semaphore
1409 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1410 if (!(swsm & IXGBE_SWSM_SMBI))
1414 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1416 for (i = 0; i < timeout; i++) {
1417 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1419 /* Set the SW EEPROM semaphore bit to request access */
1420 swsm |= IXGBE_SWSM_SWESMBI;
1421 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1424 * If we set the bit successfully then we got the
1427 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1428 if (swsm & IXGBE_SWSM_SWESMBI)
1435 * Release semaphores and return error if SW EEPROM semaphore
1436 * was not granted because we don't have access to the EEPROM
1439 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1440 ixgbe_release_eeprom_semaphore(hw);
1441 status = IXGBE_ERR_EEPROM;
1444 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1451 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1452 * @hw: pointer to hardware structure
1454 * This function clears hardware semaphore bits.
1456 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1460 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1462 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1463 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1464 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1465 IXGBE_WRITE_FLUSH(hw);
1469 * ixgbe_ready_eeprom - Polls for EEPROM ready
1470 * @hw: pointer to hardware structure
1472 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1479 * Read "Status Register" repeatedly until the LSB is cleared. The
1480 * EEPROM will signal that the command has been completed by clearing
1481 * bit 0 of the internal status register. If it's not cleared within
1482 * 5 milliseconds, then error out.
1484 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1485 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1486 IXGBE_EEPROM_OPCODE_BITS);
1487 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1488 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1492 ixgbe_standby_eeprom(hw);
1496 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1497 * devices (and only 0-5mSec on 5V devices)
1499 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1500 hw_dbg(hw, "SPI EEPROM Status error\n");
1501 status = IXGBE_ERR_EEPROM;
1508 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1509 * @hw: pointer to hardware structure
1511 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1515 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1517 /* Toggle CS to flush commands */
1518 eec |= IXGBE_EEC_CS;
1519 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1520 IXGBE_WRITE_FLUSH(hw);
1522 eec &= ~IXGBE_EEC_CS;
1523 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1524 IXGBE_WRITE_FLUSH(hw);
1529 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1530 * @hw: pointer to hardware structure
1531 * @data: data to send to the EEPROM
1532 * @count: number of bits to shift out
1534 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1541 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1544 * Mask is used to shift "count" bits of "data" out to the EEPROM
1545 * one bit at a time. Determine the starting bit based on count
1547 mask = 0x01 << (count - 1);
1549 for (i = 0; i < count; i++) {
1551 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1552 * "1", and then raising and then lowering the clock (the SK
1553 * bit controls the clock input to the EEPROM). A "0" is
1554 * shifted out to the EEPROM by setting "DI" to "0" and then
1555 * raising and then lowering the clock.
1558 eec |= IXGBE_EEC_DI;
1560 eec &= ~IXGBE_EEC_DI;
1562 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1563 IXGBE_WRITE_FLUSH(hw);
1567 ixgbe_raise_eeprom_clk(hw, &eec);
1568 ixgbe_lower_eeprom_clk(hw, &eec);
1571 * Shift mask to signify next bit of data to shift in to the
1577 /* We leave the "DI" bit set to "0" when we leave this routine. */
1578 eec &= ~IXGBE_EEC_DI;
1579 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1580 IXGBE_WRITE_FLUSH(hw);
1584 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1585 * @hw: pointer to hardware structure
1587 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1594 * In order to read a register from the EEPROM, we need to shift
1595 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1596 * the clock input to the EEPROM (setting the SK bit), and then reading
1597 * the value of the "DO" bit. During this "shifting in" process the
1598 * "DI" bit should always be clear.
1600 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1602 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1604 for (i = 0; i < count; i++) {
1606 ixgbe_raise_eeprom_clk(hw, &eec);
1608 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1610 eec &= ~(IXGBE_EEC_DI);
1611 if (eec & IXGBE_EEC_DO)
1614 ixgbe_lower_eeprom_clk(hw, &eec);
1621 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1622 * @hw: pointer to hardware structure
1623 * @eec: EEC register's current value
1625 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1628 * Raise the clock input to the EEPROM
1629 * (setting the SK bit), then delay
1631 *eec = *eec | IXGBE_EEC_SK;
1632 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1633 IXGBE_WRITE_FLUSH(hw);
1638 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1639 * @hw: pointer to hardware structure
1640 * @eecd: EECD's current value
1642 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1645 * Lower the clock input to the EEPROM (clearing the SK bit), then
1648 *eec = *eec & ~IXGBE_EEC_SK;
1649 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1650 IXGBE_WRITE_FLUSH(hw);
1655 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1656 * @hw: pointer to hardware structure
1658 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1662 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1664 eec |= IXGBE_EEC_CS; /* Pull CS high */
1665 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1667 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1668 IXGBE_WRITE_FLUSH(hw);
1672 /* Stop requesting EEPROM access */
1673 eec &= ~IXGBE_EEC_REQ;
1674 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1676 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1679 * Delay before attempt to obtain semaphore again to allow FW
1680 * access. semaphore_delay is in ms we need us for usleep_range
1682 usleep_range(hw->eeprom.semaphore_delay * 1000,
1683 hw->eeprom.semaphore_delay * 2000);
1687 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1688 * @hw: pointer to hardware structure
1690 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1699 /* Include 0x0-0x3F in the checksum */
1700 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1701 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
1702 hw_dbg(hw, "EEPROM read failed\n");
1708 /* Include all data from pointers except for the fw pointer */
1709 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1710 hw->eeprom.ops.read(hw, i, &pointer);
1712 /* Make sure the pointer seems valid */
1713 if (pointer != 0xFFFF && pointer != 0) {
1714 hw->eeprom.ops.read(hw, pointer, &length);
1716 if (length != 0xFFFF && length != 0) {
1717 for (j = pointer+1; j <= pointer+length; j++) {
1718 hw->eeprom.ops.read(hw, j, &word);
1725 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1731 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1732 * @hw: pointer to hardware structure
1733 * @checksum_val: calculated checksum
1735 * Performs checksum calculation and validates the EEPROM checksum. If the
1736 * caller does not need checksum_val, the value can be NULL.
1738 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1743 u16 read_checksum = 0;
1746 * Read the first word from the EEPROM. If this times out or fails, do
1747 * not continue or we could be in for a very long wait while every
1750 status = hw->eeprom.ops.read(hw, 0, &checksum);
1753 checksum = hw->eeprom.ops.calc_checksum(hw);
1755 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1758 * Verify read checksum from EEPROM is the same as
1759 * calculated checksum
1761 if (read_checksum != checksum)
1762 status = IXGBE_ERR_EEPROM_CHECKSUM;
1764 /* If the user cares, return the calculated checksum */
1766 *checksum_val = checksum;
1768 hw_dbg(hw, "EEPROM read failed\n");
1775 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1776 * @hw: pointer to hardware structure
1778 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1784 * Read the first word from the EEPROM. If this times out or fails, do
1785 * not continue or we could be in for a very long wait while every
1788 status = hw->eeprom.ops.read(hw, 0, &checksum);
1791 checksum = hw->eeprom.ops.calc_checksum(hw);
1792 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1795 hw_dbg(hw, "EEPROM read failed\n");
1802 * ixgbe_set_rar_generic - Set Rx address register
1803 * @hw: pointer to hardware structure
1804 * @index: Receive address register to write
1805 * @addr: Address to put into receive address register
1806 * @vmdq: VMDq "set" or "pool" index
1807 * @enable_addr: set flag that address is active
1809 * Puts an ethernet address into a receive address register.
1811 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1814 u32 rar_low, rar_high;
1815 u32 rar_entries = hw->mac.num_rar_entries;
1817 /* Make sure we are using a valid rar index range */
1818 if (index >= rar_entries) {
1819 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1820 return IXGBE_ERR_INVALID_ARGUMENT;
1823 /* setup VMDq pool selection before this RAR gets enabled */
1824 hw->mac.ops.set_vmdq(hw, index, vmdq);
1827 * HW expects these in little endian so we reverse the byte
1828 * order from network order (big endian) to little endian
1830 rar_low = ((u32)addr[0] |
1831 ((u32)addr[1] << 8) |
1832 ((u32)addr[2] << 16) |
1833 ((u32)addr[3] << 24));
1835 * Some parts put the VMDq setting in the extra RAH bits,
1836 * so save everything except the lower 16 bits that hold part
1837 * of the address and the address valid bit.
1839 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1840 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1841 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1843 if (enable_addr != 0)
1844 rar_high |= IXGBE_RAH_AV;
1846 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1847 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1853 * ixgbe_clear_rar_generic - Remove Rx address register
1854 * @hw: pointer to hardware structure
1855 * @index: Receive address register to write
1857 * Clears an ethernet address from a receive address register.
1859 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1862 u32 rar_entries = hw->mac.num_rar_entries;
1864 /* Make sure we are using a valid rar index range */
1865 if (index >= rar_entries) {
1866 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1867 return IXGBE_ERR_INVALID_ARGUMENT;
1871 * Some parts put the VMDq setting in the extra RAH bits,
1872 * so save everything except the lower 16 bits that hold part
1873 * of the address and the address valid bit.
1875 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1876 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1878 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1879 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1881 /* clear VMDq pool/queue selection for this RAR */
1882 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1888 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1889 * @hw: pointer to hardware structure
1891 * Places the MAC address in receive address register 0 and clears the rest
1892 * of the receive address registers. Clears the multicast table. Assumes
1893 * the receiver is in reset when the routine is called.
1895 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1898 u32 rar_entries = hw->mac.num_rar_entries;
1901 * If the current mac address is valid, assume it is a software override
1902 * to the permanent address.
1903 * Otherwise, use the permanent address from the eeprom.
1905 if (!is_valid_ether_addr(hw->mac.addr)) {
1906 /* Get the MAC address from the RAR0 for later reference */
1907 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1909 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1911 /* Setup the receive address. */
1912 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1913 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1915 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1917 /* clear VMDq pool/queue selection for RAR 0 */
1918 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1920 hw->addr_ctrl.overflow_promisc = 0;
1922 hw->addr_ctrl.rar_used_count = 1;
1924 /* Zero out the other receive addresses. */
1925 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1926 for (i = 1; i < rar_entries; i++) {
1927 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1928 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1932 hw->addr_ctrl.mta_in_use = 0;
1933 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1935 hw_dbg(hw, " Clearing MTA\n");
1936 for (i = 0; i < hw->mac.mcft_size; i++)
1937 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1939 if (hw->mac.ops.init_uta_tables)
1940 hw->mac.ops.init_uta_tables(hw);
1946 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1947 * @hw: pointer to hardware structure
1948 * @mc_addr: the multicast address
1950 * Extracts the 12 bits, from a multicast address, to determine which
1951 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1952 * incoming rx multicast addresses, to determine the bit-vector to check in
1953 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1954 * by the MO field of the MCSTCTRL. The MO field is set during initialization
1955 * to mc_filter_type.
1957 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1961 switch (hw->mac.mc_filter_type) {
1962 case 0: /* use bits [47:36] of the address */
1963 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1965 case 1: /* use bits [46:35] of the address */
1966 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1968 case 2: /* use bits [45:34] of the address */
1969 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1971 case 3: /* use bits [43:32] of the address */
1972 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1974 default: /* Invalid mc_filter_type */
1975 hw_dbg(hw, "MC filter type param set incorrectly\n");
1979 /* vector can only be 12-bits or boundary will be exceeded */
1985 * ixgbe_set_mta - Set bit-vector in multicast table
1986 * @hw: pointer to hardware structure
1987 * @hash_value: Multicast address hash value
1989 * Sets the bit-vector in the multicast table.
1991 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1997 hw->addr_ctrl.mta_in_use++;
1999 vector = ixgbe_mta_vector(hw, mc_addr);
2000 hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
2003 * The MTA is a register array of 128 32-bit registers. It is treated
2004 * like an array of 4096 bits. We want to set bit
2005 * BitArray[vector_value]. So we figure out what register the bit is
2006 * in, read it, OR in the new bit, then write back the new value. The
2007 * register is determined by the upper 7 bits of the vector value and
2008 * the bit within that register are determined by the lower 5 bits of
2011 vector_reg = (vector >> 5) & 0x7F;
2012 vector_bit = vector & 0x1F;
2013 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2017 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2018 * @hw: pointer to hardware structure
2019 * @netdev: pointer to net device structure
2021 * The given list replaces any existing list. Clears the MC addrs from receive
2022 * address registers and the multicast table. Uses unused receive address
2023 * registers for the first multicast addresses, and hashes the rest into the
2026 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2027 struct net_device *netdev)
2029 struct netdev_hw_addr *ha;
2033 * Set the new number of MC addresses that we are being requested to
2036 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
2037 hw->addr_ctrl.mta_in_use = 0;
2039 /* Clear mta_shadow */
2040 hw_dbg(hw, " Clearing MTA\n");
2041 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2043 /* Update mta shadow */
2044 netdev_for_each_mc_addr(ha, netdev) {
2045 hw_dbg(hw, " Adding the multicast addresses:\n");
2046 ixgbe_set_mta(hw, ha->addr);
2050 for (i = 0; i < hw->mac.mcft_size; i++)
2051 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2052 hw->mac.mta_shadow[i]);
2054 if (hw->addr_ctrl.mta_in_use > 0)
2055 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2056 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2058 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2063 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2064 * @hw: pointer to hardware structure
2066 * Enables multicast address in RAR and the use of the multicast hash table.
2068 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2070 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2072 if (a->mta_in_use > 0)
2073 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2074 hw->mac.mc_filter_type);
2080 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2081 * @hw: pointer to hardware structure
2083 * Disables multicast address in RAR and the use of the multicast hash table.
2085 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2087 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2089 if (a->mta_in_use > 0)
2090 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2096 * ixgbe_fc_enable_generic - Enable flow control
2097 * @hw: pointer to hardware structure
2099 * Enable flow control according to the current settings.
2101 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2104 u32 mflcn_reg, fccfg_reg;
2109 /* Validate the water mark configuration. */
2110 if (!hw->fc.pause_time) {
2111 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2115 /* Low water mark of zero causes XOFF floods */
2116 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2117 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2118 hw->fc.high_water[i]) {
2119 if (!hw->fc.low_water[i] ||
2120 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2121 hw_dbg(hw, "Invalid water mark configuration\n");
2122 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2128 /* Negotiate the fc mode to use */
2129 ixgbe_fc_autoneg(hw);
2131 /* Disable any previous flow control settings */
2132 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2133 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2135 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2136 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2139 * The possible values of fc.current_mode are:
2140 * 0: Flow control is completely disabled
2141 * 1: Rx flow control is enabled (we can receive pause frames,
2142 * but not send pause frames).
2143 * 2: Tx flow control is enabled (we can send pause frames but
2144 * we do not support receiving pause frames).
2145 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2148 switch (hw->fc.current_mode) {
2151 * Flow control is disabled by software override or autoneg.
2152 * The code below will actually disable it in the HW.
2155 case ixgbe_fc_rx_pause:
2157 * Rx Flow control is enabled and Tx Flow control is
2158 * disabled by software override. Since there really
2159 * isn't a way to advertise that we are capable of RX
2160 * Pause ONLY, we will advertise that we support both
2161 * symmetric and asymmetric Rx PAUSE. Later, we will
2162 * disable the adapter's ability to send PAUSE frames.
2164 mflcn_reg |= IXGBE_MFLCN_RFCE;
2166 case ixgbe_fc_tx_pause:
2168 * Tx Flow control is enabled, and Rx Flow control is
2169 * disabled by software override.
2171 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2174 /* Flow control (both Rx and Tx) is enabled by SW override. */
2175 mflcn_reg |= IXGBE_MFLCN_RFCE;
2176 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2179 hw_dbg(hw, "Flow control param set incorrectly\n");
2180 ret_val = IXGBE_ERR_CONFIG;
2185 /* Set 802.3x based flow control settings. */
2186 mflcn_reg |= IXGBE_MFLCN_DPF;
2187 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2188 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2190 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2191 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2192 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2193 hw->fc.high_water[i]) {
2194 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2195 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2196 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2198 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2200 * In order to prevent Tx hangs when the internal Tx
2201 * switch is enabled we must set the high water mark
2202 * to the maximum FCRTH value. This allows the Tx
2203 * switch to function even under heavy Rx workloads.
2205 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2208 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2211 /* Configure pause time (2 TCs per register) */
2212 reg = hw->fc.pause_time * 0x00010001;
2213 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2214 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2216 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2223 * ixgbe_negotiate_fc - Negotiate flow control
2224 * @hw: pointer to hardware structure
2225 * @adv_reg: flow control advertised settings
2226 * @lp_reg: link partner's flow control settings
2227 * @adv_sym: symmetric pause bit in advertisement
2228 * @adv_asm: asymmetric pause bit in advertisement
2229 * @lp_sym: symmetric pause bit in link partner advertisement
2230 * @lp_asm: asymmetric pause bit in link partner advertisement
2232 * Find the intersection between advertised settings and link partner's
2233 * advertised settings
2235 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2236 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2238 if ((!(adv_reg)) || (!(lp_reg)))
2239 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2241 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2243 * Now we need to check if the user selected Rx ONLY
2244 * of pause frames. In this case, we had to advertise
2245 * FULL flow control because we could not advertise RX
2246 * ONLY. Hence, we must now check to see if we need to
2247 * turn OFF the TRANSMISSION of PAUSE frames.
2249 if (hw->fc.requested_mode == ixgbe_fc_full) {
2250 hw->fc.current_mode = ixgbe_fc_full;
2251 hw_dbg(hw, "Flow Control = FULL.\n");
2253 hw->fc.current_mode = ixgbe_fc_rx_pause;
2254 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2256 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2257 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2258 hw->fc.current_mode = ixgbe_fc_tx_pause;
2259 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2260 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2261 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2262 hw->fc.current_mode = ixgbe_fc_rx_pause;
2263 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2265 hw->fc.current_mode = ixgbe_fc_none;
2266 hw_dbg(hw, "Flow Control = NONE.\n");
2272 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2273 * @hw: pointer to hardware structure
2275 * Enable flow control according on 1 gig fiber.
2277 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2279 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2280 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2283 * On multispeed fiber at 1g, bail out if
2284 * - link is up but AN did not complete, or if
2285 * - link is up and AN completed but timed out
2288 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2289 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2290 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2293 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2294 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2296 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2297 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2298 IXGBE_PCS1GANA_ASM_PAUSE,
2299 IXGBE_PCS1GANA_SYM_PAUSE,
2300 IXGBE_PCS1GANA_ASM_PAUSE);
2307 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2308 * @hw: pointer to hardware structure
2310 * Enable flow control according to IEEE clause 37.
2312 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2314 u32 links2, anlp1_reg, autoc_reg, links;
2315 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2318 * On backplane, bail out if
2319 * - backplane autoneg was not completed, or if
2320 * - we are 82599 and link partner is not AN enabled
2322 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2323 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2326 if (hw->mac.type == ixgbe_mac_82599EB) {
2327 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2328 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2332 * Read the 10g AN autoc and LP ability registers and resolve
2333 * local flow control settings accordingly
2335 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2336 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2338 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2339 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2340 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2347 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2348 * @hw: pointer to hardware structure
2350 * Enable flow control according to IEEE clause 37.
2352 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2354 u16 technology_ability_reg = 0;
2355 u16 lp_technology_ability_reg = 0;
2357 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2359 &technology_ability_reg);
2360 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
2362 &lp_technology_ability_reg);
2364 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2365 (u32)lp_technology_ability_reg,
2366 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2367 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2371 * ixgbe_fc_autoneg - Configure flow control
2372 * @hw: pointer to hardware structure
2374 * Compares our advertised flow control capabilities to those advertised by
2375 * our link partner, and determines the proper flow control mode to use.
2377 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2379 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2380 ixgbe_link_speed speed;
2384 * AN should have completed when the cable was plugged in.
2385 * Look for reasons to bail out. Bail out if:
2386 * - FC autoneg is disabled, or if
2389 * Since we're being called from an LSC, link is already known to be up.
2390 * So use link_up_wait_to_complete=false.
2392 if (hw->fc.disable_fc_autoneg)
2395 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2399 switch (hw->phy.media_type) {
2400 /* Autoneg flow control on fiber adapters */
2401 case ixgbe_media_type_fiber:
2402 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2403 ret_val = ixgbe_fc_autoneg_fiber(hw);
2406 /* Autoneg flow control on backplane adapters */
2407 case ixgbe_media_type_backplane:
2408 ret_val = ixgbe_fc_autoneg_backplane(hw);
2411 /* Autoneg flow control on copper adapters */
2412 case ixgbe_media_type_copper:
2413 if (ixgbe_device_supports_autoneg_fc(hw))
2414 ret_val = ixgbe_fc_autoneg_copper(hw);
2423 hw->fc.fc_was_autonegged = true;
2425 hw->fc.fc_was_autonegged = false;
2426 hw->fc.current_mode = hw->fc.requested_mode;
2431 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2432 * @hw: pointer to hardware structure
2434 * System-wide timeout range is encoded in PCIe Device Control2 register.
2436 * Add 10% to specified maximum and return the number of times to poll for
2437 * completion timeout, in units of 100 microsec. Never return less than
2438 * 800 = 80 millisec.
2440 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2445 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
2446 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2449 case IXGBE_PCIDEVCTRL2_65_130ms:
2450 pollcnt = 1300; /* 130 millisec */
2452 case IXGBE_PCIDEVCTRL2_260_520ms:
2453 pollcnt = 5200; /* 520 millisec */
2455 case IXGBE_PCIDEVCTRL2_1_2s:
2456 pollcnt = 20000; /* 2 sec */
2458 case IXGBE_PCIDEVCTRL2_4_8s:
2459 pollcnt = 80000; /* 8 sec */
2461 case IXGBE_PCIDEVCTRL2_17_34s:
2462 pollcnt = 34000; /* 34 sec */
2464 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
2465 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
2466 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
2467 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
2469 pollcnt = 800; /* 80 millisec minimum */
2473 /* add 10% to spec maximum */
2474 return (pollcnt * 11) / 10;
2478 * ixgbe_disable_pcie_master - Disable PCI-express master access
2479 * @hw: pointer to hardware structure
2481 * Disables PCI-Express master access and verifies there are no pending
2482 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2483 * bit hasn't caused the master requests to be disabled, else 0
2484 * is returned signifying master requests disabled.
2486 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2492 /* Always set this bit to ensure any future transactions are blocked */
2493 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2495 /* Exit if master requests are blocked */
2496 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2497 ixgbe_removed(hw->hw_addr))
2500 /* Poll for master request bit to clear */
2501 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2503 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2508 * Two consecutive resets are required via CTRL.RST per datasheet
2509 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2510 * of this need. The first reset prevents new master requests from
2511 * being issued by our device. We then must wait 1usec or more for any
2512 * remaining completions from the PCIe bus to trickle in, and then reset
2513 * again to clear out any effects they may have had on our device.
2515 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2516 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2519 * Before proceeding, make sure that the PCIe block does not have
2520 * transactions pending.
2522 poll = ixgbe_pcie_timeout_poll(hw);
2523 for (i = 0; i < poll; i++) {
2525 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2526 if (ixgbe_removed(hw->hw_addr))
2528 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2532 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2533 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2540 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2541 * @hw: pointer to hardware structure
2542 * @mask: Mask to specify which semaphore to acquire
2544 * Acquires the SWFW semaphore through the GSSR register for the specified
2545 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2547 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2551 u32 fwmask = mask << 5;
2555 for (i = 0; i < timeout; i++) {
2557 * SW NVM semaphore bit is used for access to all
2558 * SW_FW_SYNC bits (not just NVM)
2560 if (ixgbe_get_eeprom_semaphore(hw))
2561 return IXGBE_ERR_SWFW_SYNC;
2563 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2564 if (!(gssr & (fwmask | swmask))) {
2566 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2567 ixgbe_release_eeprom_semaphore(hw);
2570 /* Resource is currently in use by FW or SW */
2571 ixgbe_release_eeprom_semaphore(hw);
2572 usleep_range(5000, 10000);
2576 /* If time expired clear the bits holding the lock and retry */
2577 if (gssr & (fwmask | swmask))
2578 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2580 usleep_range(5000, 10000);
2581 return IXGBE_ERR_SWFW_SYNC;
2585 * ixgbe_release_swfw_sync - Release SWFW semaphore
2586 * @hw: pointer to hardware structure
2587 * @mask: Mask to specify which semaphore to release
2589 * Releases the SWFW semaphore through the GSSR register for the specified
2590 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2592 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2597 ixgbe_get_eeprom_semaphore(hw);
2599 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2601 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2603 ixgbe_release_eeprom_semaphore(hw);
2607 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2608 * @hw: pointer to hardware structure
2609 * @reg_val: Value we read from AUTOC
2610 * @locked: bool to indicate whether the SW/FW lock should be taken. Never
2611 * true in this the generic case.
2613 * The default case requires no protection so just to the register read.
2615 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
2618 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2623 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2624 * @hw: pointer to hardware structure
2625 * @reg_val: value to write to AUTOC
2626 * @locked: bool to indicate whether the SW/FW lock was already taken by
2629 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
2631 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2636 * ixgbe_disable_rx_buff_generic - Stops the receive data path
2637 * @hw: pointer to hardware structure
2639 * Stops the receive data path and waits for the HW to internally
2640 * empty the Rx security block.
2642 s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2644 #define IXGBE_MAX_SECRX_POLL 40
2648 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2649 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2650 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2651 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2652 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2653 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2656 /* Use interrupt-safe sleep just in case */
2660 /* For informational purposes only */
2661 if (i >= IXGBE_MAX_SECRX_POLL)
2662 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2669 * ixgbe_enable_rx_buff - Enables the receive data path
2670 * @hw: pointer to hardware structure
2672 * Enables the receive data path
2674 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
2678 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2679 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2680 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2681 IXGBE_WRITE_FLUSH(hw);
2687 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2688 * @hw: pointer to hardware structure
2689 * @regval: register value to write to RXCTRL
2691 * Enables the Rx DMA unit
2693 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2695 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2701 * ixgbe_blink_led_start_generic - Blink LED based on index.
2702 * @hw: pointer to hardware structure
2703 * @index: led number to blink
2705 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2707 ixgbe_link_speed speed = 0;
2708 bool link_up = false;
2709 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2710 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2712 bool locked = false;
2715 * Link must be up to auto-blink the LEDs;
2716 * Force it if link is down.
2718 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2721 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2725 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2726 autoc_reg |= IXGBE_AUTOC_FLU;
2728 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2732 IXGBE_WRITE_FLUSH(hw);
2734 usleep_range(10000, 20000);
2737 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2738 led_reg |= IXGBE_LED_BLINK(index);
2739 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2740 IXGBE_WRITE_FLUSH(hw);
2747 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2748 * @hw: pointer to hardware structure
2749 * @index: led number to stop blinking
2751 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2754 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2756 bool locked = false;
2758 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2762 autoc_reg &= ~IXGBE_AUTOC_FLU;
2763 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2765 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2769 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2770 led_reg &= ~IXGBE_LED_BLINK(index);
2771 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2772 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2773 IXGBE_WRITE_FLUSH(hw);
2780 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2781 * @hw: pointer to hardware structure
2782 * @san_mac_offset: SAN MAC address offset
2784 * This function will read the EEPROM location for the SAN MAC address
2785 * pointer, and returns the value at that location. This is used in both
2786 * get and set mac_addr routines.
2788 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2789 u16 *san_mac_offset)
2794 * First read the EEPROM pointer to see if the MAC addresses are
2797 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
2800 hw_err(hw, "eeprom read at offset %d failed\n",
2801 IXGBE_SAN_MAC_ADDR_PTR);
2807 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2808 * @hw: pointer to hardware structure
2809 * @san_mac_addr: SAN MAC address
2811 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2812 * per-port, so set_lan_id() must be called before reading the addresses.
2813 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2814 * upon for non-SFP connections, so we must call it here.
2816 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2818 u16 san_mac_data, san_mac_offset;
2823 * First read the EEPROM pointer to see if the MAC addresses are
2824 * available. If they're not, no point in calling set_lan_id() here.
2826 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2827 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
2829 goto san_mac_addr_clr;
2831 /* make sure we know which port we need to program */
2832 hw->mac.ops.set_lan_id(hw);
2833 /* apply the port offset to the address offset */
2834 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2835 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2836 for (i = 0; i < 3; i++) {
2837 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2840 hw_err(hw, "eeprom read at offset %d failed\n",
2842 goto san_mac_addr_clr;
2844 san_mac_addr[i * 2] = (u8)(san_mac_data);
2845 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2851 /* No addresses available in this EEPROM. It's not necessarily an
2852 * error though, so just wipe the local address and return.
2854 for (i = 0; i < 6; i++)
2855 san_mac_addr[i] = 0xFF;
2860 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2861 * @hw: pointer to hardware structure
2863 * Read PCIe configuration space, and get the MSI-X vector count from
2864 * the capabilities table.
2866 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2872 switch (hw->mac.type) {
2873 case ixgbe_mac_82598EB:
2874 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2875 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2877 case ixgbe_mac_82599EB:
2878 case ixgbe_mac_X540:
2879 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2880 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2886 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
2887 if (ixgbe_removed(hw->hw_addr))
2889 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2891 /* MSI-X count is zero-based in HW */
2894 if (msix_count > max_msix_count)
2895 msix_count = max_msix_count;
2901 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2902 * @hw: pointer to hardware struct
2903 * @rar: receive address register index to disassociate
2904 * @vmdq: VMDq pool index to remove from the rar
2906 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2908 u32 mpsar_lo, mpsar_hi;
2909 u32 rar_entries = hw->mac.num_rar_entries;
2911 /* Make sure we are using a valid rar index range */
2912 if (rar >= rar_entries) {
2913 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2914 return IXGBE_ERR_INVALID_ARGUMENT;
2917 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2918 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2920 if (ixgbe_removed(hw->hw_addr))
2923 if (!mpsar_lo && !mpsar_hi)
2926 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2928 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2932 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2935 } else if (vmdq < 32) {
2936 mpsar_lo &= ~(1 << vmdq);
2937 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2939 mpsar_hi &= ~(1 << (vmdq - 32));
2940 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2943 /* was that the last pool using this rar? */
2944 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2945 hw->mac.ops.clear_rar(hw, rar);
2951 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2952 * @hw: pointer to hardware struct
2953 * @rar: receive address register index to associate with a VMDq index
2954 * @vmdq: VMDq pool index
2956 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2959 u32 rar_entries = hw->mac.num_rar_entries;
2961 /* Make sure we are using a valid rar index range */
2962 if (rar >= rar_entries) {
2963 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2964 return IXGBE_ERR_INVALID_ARGUMENT;
2968 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2970 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2972 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2973 mpsar |= 1 << (vmdq - 32);
2974 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2980 * This function should only be involved in the IOV mode.
2981 * In IOV mode, Default pool is next pool after the number of
2982 * VFs advertized and not 0.
2983 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
2985 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
2986 * @hw: pointer to hardware struct
2987 * @vmdq: VMDq pool index
2989 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
2991 u32 rar = hw->mac.san_mac_rar_index;
2994 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
2995 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2997 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2998 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3005 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3006 * @hw: pointer to hardware structure
3008 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3012 for (i = 0; i < 128; i++)
3013 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3019 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3020 * @hw: pointer to hardware structure
3021 * @vlan: VLAN id to write to VLAN filter
3023 * return the VLVF index where this VLAN id should be placed
3026 static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3029 u32 first_empty_slot = 0;
3032 /* short cut the special case */
3037 * Search for the vlan id in the VLVF entries. Save off the first empty
3038 * slot found along the way
3040 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3041 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3042 if (!bits && !(first_empty_slot))
3043 first_empty_slot = regindex;
3044 else if ((bits & 0x0FFF) == vlan)
3049 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3050 * in the VLVF. Else use the first empty VLVF register for this
3053 if (regindex >= IXGBE_VLVF_ENTRIES) {
3054 if (first_empty_slot)
3055 regindex = first_empty_slot;
3057 hw_dbg(hw, "No space in VLVF.\n");
3058 regindex = IXGBE_ERR_NO_SPACE;
3066 * ixgbe_set_vfta_generic - Set VLAN filter table
3067 * @hw: pointer to hardware structure
3068 * @vlan: VLAN id to write to VLAN filter
3069 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3070 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3072 * Turn on/off specified VLAN in the VLAN filter table.
3074 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3083 bool vfta_changed = false;
3086 return IXGBE_ERR_PARAM;
3089 * this is a 2 part operation - first the VFTA, then the
3090 * VLVF and VLVFB if VT Mode is set
3091 * We don't write the VFTA until we know the VLVF part succeeded.
3095 * The VFTA is a bitstring made up of 128 32-bit registers
3096 * that enable the particular VLAN id, much like the MTA:
3097 * bits[11-5]: which register
3098 * bits[4-0]: which bit in the register
3100 regindex = (vlan >> 5) & 0x7F;
3101 bitindex = vlan & 0x1F;
3102 targetbit = (1 << bitindex);
3103 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3106 if (!(vfta & targetbit)) {
3108 vfta_changed = true;
3111 if ((vfta & targetbit)) {
3113 vfta_changed = true;
3120 * make sure the vlan is in VLVF
3121 * set the vind bit in the matching VLVFB
3123 * clear the pool bit and possibly the vind
3125 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3126 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3129 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3134 /* set the pool bit */
3136 bits = IXGBE_READ_REG(hw,
3137 IXGBE_VLVFB(vlvf_index*2));
3138 bits |= (1 << vind);
3140 IXGBE_VLVFB(vlvf_index*2),
3143 bits = IXGBE_READ_REG(hw,
3144 IXGBE_VLVFB((vlvf_index*2)+1));
3145 bits |= (1 << (vind-32));
3147 IXGBE_VLVFB((vlvf_index*2)+1),
3151 /* clear the pool bit */
3153 bits = IXGBE_READ_REG(hw,
3154 IXGBE_VLVFB(vlvf_index*2));
3155 bits &= ~(1 << vind);
3157 IXGBE_VLVFB(vlvf_index*2),
3159 bits |= IXGBE_READ_REG(hw,
3160 IXGBE_VLVFB((vlvf_index*2)+1));
3162 bits = IXGBE_READ_REG(hw,
3163 IXGBE_VLVFB((vlvf_index*2)+1));
3164 bits &= ~(1 << (vind-32));
3166 IXGBE_VLVFB((vlvf_index*2)+1),
3168 bits |= IXGBE_READ_REG(hw,
3169 IXGBE_VLVFB(vlvf_index*2));
3174 * If there are still bits set in the VLVFB registers
3175 * for the VLAN ID indicated we need to see if the
3176 * caller is requesting that we clear the VFTA entry bit.
3177 * If the caller has requested that we clear the VFTA
3178 * entry bit but there are still pools/VFs using this VLAN
3179 * ID entry then ignore the request. We're not worried
3180 * about the case where we're turning the VFTA VLAN ID
3181 * entry bit on, only when requested to turn it off as
3182 * there may be multiple pools and/or VFs using the
3183 * VLAN ID entry. In that case we cannot clear the
3184 * VFTA bit until all pools/VFs using that VLAN ID have also
3185 * been cleared. This will be indicated by "bits" being
3189 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3190 (IXGBE_VLVF_VIEN | vlan));
3192 /* someone wants to clear the vfta entry
3193 * but some pools/VFs are still using it.
3195 vfta_changed = false;
3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3203 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3209 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3210 * @hw: pointer to hardware structure
3212 * Clears the VLAN filer table, and the VMDq index associated with the filter
3214 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3218 for (offset = 0; offset < hw->mac.vft_size; offset++)
3219 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3221 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3222 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3223 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
3224 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
3231 * ixgbe_check_mac_link_generic - Determine link and speed status
3232 * @hw: pointer to hardware structure
3233 * @speed: pointer to link speed
3234 * @link_up: true when link is up
3235 * @link_up_wait_to_complete: bool used to wait for link up or not
3237 * Reads the links register to determine if link is up and the current speed
3239 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3240 bool *link_up, bool link_up_wait_to_complete)
3242 u32 links_reg, links_orig;
3245 /* clear the old state */
3246 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3248 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3250 if (links_orig != links_reg) {
3251 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
3252 links_orig, links_reg);
3255 if (link_up_wait_to_complete) {
3256 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3257 if (links_reg & IXGBE_LINKS_UP) {
3264 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3267 if (links_reg & IXGBE_LINKS_UP)
3273 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3274 IXGBE_LINKS_SPEED_10G_82599)
3275 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3276 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3277 IXGBE_LINKS_SPEED_1G_82599)
3278 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3279 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3280 IXGBE_LINKS_SPEED_100_82599)
3281 *speed = IXGBE_LINK_SPEED_100_FULL;
3283 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3289 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3291 * @hw: pointer to hardware structure
3292 * @wwnn_prefix: the alternative WWNN prefix
3293 * @wwpn_prefix: the alternative WWPN prefix
3295 * This function will read the EEPROM from the alternative SAN MAC address
3296 * block to check the support for the alternative WWNN/WWPN prefix support.
3298 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3302 u16 alt_san_mac_blk_offset;
3304 /* clear output first */
3305 *wwnn_prefix = 0xFFFF;
3306 *wwpn_prefix = 0xFFFF;
3308 /* check if alternative SAN MAC is supported */
3309 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3310 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3311 goto wwn_prefix_err;
3313 if ((alt_san_mac_blk_offset == 0) ||
3314 (alt_san_mac_blk_offset == 0xFFFF))
3315 goto wwn_prefix_out;
3317 /* check capability in alternative san mac address block */
3318 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3319 if (hw->eeprom.ops.read(hw, offset, &caps))
3320 goto wwn_prefix_err;
3321 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3322 goto wwn_prefix_out;
3324 /* get the corresponding prefix for WWNN/WWPN */
3325 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3326 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
3327 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3329 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3330 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3331 goto wwn_prefix_err;
3337 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3342 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3343 * @hw: pointer to hardware structure
3344 * @enable: enable or disable switch for anti-spoofing
3345 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3348 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3351 int pf_target_reg = pf >> 3;
3352 int pf_target_shift = pf % 8;
3355 if (hw->mac.type == ixgbe_mac_82598EB)
3359 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3362 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3363 * MAC anti-spoof enables in each register array element.
3365 for (j = 0; j < pf_target_reg; j++)
3366 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3369 * The PF should be allowed to spoof so that it can support
3370 * emulation mode NICs. Do not set the bits assigned to the PF
3372 pfvfspoof &= (1 << pf_target_shift) - 1;
3373 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3376 * Remaining pools belong to the PF so they do not need to have
3377 * anti-spoofing enabled.
3379 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3380 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3384 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3385 * @hw: pointer to hardware structure
3386 * @enable: enable or disable switch for VLAN anti-spoofing
3387 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3390 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3392 int vf_target_reg = vf >> 3;
3393 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3396 if (hw->mac.type == ixgbe_mac_82598EB)
3399 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3401 pfvfspoof |= (1 << vf_target_shift);
3403 pfvfspoof &= ~(1 << vf_target_shift);
3404 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3408 * ixgbe_get_device_caps_generic - Get additional device capabilities
3409 * @hw: pointer to hardware structure
3410 * @device_caps: the EEPROM word with the extra device capabilities
3412 * This function will read the EEPROM location for the device capabilities,
3413 * and return the word through device_caps.
3415 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3417 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3423 * ixgbe_set_rxpba_generic - Initialize RX packet buffer
3424 * @hw: pointer to hardware structure
3425 * @num_pb: number of packet buffers to allocate
3426 * @headroom: reserve n KB of headroom
3427 * @strategy: packet buffer allocation strategy
3429 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3434 u32 pbsize = hw->mac.rx_pb_size;
3436 u32 rxpktsize, txpktsize, txpbthresh;
3438 /* Reserve headroom */
3444 /* Divide remaining packet buffer space amongst the number
3445 * of packet buffers requested using supplied strategy.
3448 case (PBA_STRATEGY_WEIGHTED):
3449 /* pba_80_48 strategy weight first half of packet buffer with
3450 * 5/8 of the packet buffer space.
3452 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
3453 pbsize -= rxpktsize * (num_pb / 2);
3454 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
3455 for (; i < (num_pb / 2); i++)
3456 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3457 /* Fall through to configure remaining packet buffers */
3458 case (PBA_STRATEGY_EQUAL):
3459 /* Divide the remaining Rx packet buffer evenly among the TCs */
3460 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
3461 for (; i < num_pb; i++)
3462 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3469 * Setup Tx packet buffer and threshold equally for all TCs
3470 * TXPBTHRESH register is set in K so divide by 1024 and subtract
3471 * 10 since the largest packet we support is just over 9K.
3473 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
3474 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
3475 for (i = 0; i < num_pb; i++) {
3476 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3477 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3480 /* Clear unused TCs, if any, to zero buffer size*/
3481 for (; i < IXGBE_MAX_PB; i++) {
3482 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3483 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3484 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3489 * ixgbe_calculate_checksum - Calculate checksum for buffer
3490 * @buffer: pointer to EEPROM
3491 * @length: size of EEPROM to calculate a checksum for
3493 * Calculates the checksum for some buffer on a specified length. The
3494 * checksum calculated is returned.
3496 static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3504 for (i = 0; i < length; i++)
3507 return (u8) (0 - sum);
3511 * ixgbe_host_interface_command - Issue command to manageability block
3512 * @hw: pointer to the HW structure
3513 * @buffer: contains the command to write and where the return status will
3515 * @length: length of buffer, must be multiple of 4 bytes
3517 * Communicates with the manageability block. On success return 0
3518 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3520 static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3524 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3525 u8 buf_len, dword_len;
3529 if (length == 0 || length & 0x3 ||
3530 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3531 hw_dbg(hw, "Buffer length failure.\n");
3532 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3536 /* Check that the host interface is enabled. */
3537 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3538 if ((hicr & IXGBE_HICR_EN) == 0) {
3539 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3540 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3544 /* Calculate length in DWORDs */
3545 dword_len = length >> 2;
3548 * The device driver writes the relevant command block
3549 * into the ram area.
3551 for (i = 0; i < dword_len; i++)
3552 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3553 i, cpu_to_le32(buffer[i]));
3555 /* Setting this bit tells the ARC that a new command is pending. */
3556 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3558 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3559 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3560 if (!(hicr & IXGBE_HICR_C))
3562 usleep_range(1000, 2000);
3565 /* Check command successful completion. */
3566 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3567 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3568 hw_dbg(hw, "Command has failed with no status valid.\n");
3569 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3573 /* Calculate length in DWORDs */
3574 dword_len = hdr_size >> 2;
3576 /* first pull in the header so we know the buffer length */
3577 for (bi = 0; bi < dword_len; bi++) {
3578 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3579 le32_to_cpus(&buffer[bi]);
3582 /* If there is any thing in data position pull it in */
3583 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3587 if (length < (buf_len + hdr_size)) {
3588 hw_dbg(hw, "Buffer not large enough for reply message.\n");
3589 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3593 /* Calculate length in DWORDs, add 3 for odd lengths */
3594 dword_len = (buf_len + 3) >> 2;
3596 /* Pull in the rest of the buffer (bi is where we left off)*/
3597 for (; bi <= dword_len; bi++) {
3598 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3599 le32_to_cpus(&buffer[bi]);
3607 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
3608 * @hw: pointer to the HW structure
3609 * @maj: driver version major number
3610 * @min: driver version minor number
3611 * @build: driver version build number
3612 * @sub: driver version sub build number
3614 * Sends driver version number to firmware through the manageability
3615 * block. On success return 0
3616 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
3617 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3619 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3622 struct ixgbe_hic_drv_info fw_cmd;
3626 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
3627 ret_val = IXGBE_ERR_SWFW_SYNC;
3631 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3632 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3633 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3634 fw_cmd.port_num = (u8)hw->bus.func;
3635 fw_cmd.ver_maj = maj;
3636 fw_cmd.ver_min = min;
3637 fw_cmd.ver_build = build;
3638 fw_cmd.ver_sub = sub;
3639 fw_cmd.hdr.checksum = 0;
3640 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
3641 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3645 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3646 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3651 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3652 FW_CEM_RESP_STATUS_SUCCESS)
3655 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3660 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3666 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3667 * @hw: pointer to the hardware structure
3669 * The 82599 and x540 MACs can experience issues if TX work is still pending
3670 * when a reset occurs. This function prevents this by flushing the PCIe
3671 * buffers on the system.
3673 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3675 u32 gcr_ext, hlreg0;
3678 * If double reset is not requested then all transactions should
3679 * already be clear and as such there is no work to do
3681 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3685 * Set loopback enable to prevent any transmits from being sent
3686 * should the link come up. This assumes that the RXCTRL.RXEN bit
3687 * has already been cleared.
3689 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3690 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3692 /* initiate cleaning flow for buffers in the PCIe transaction layer */
3693 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3694 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3695 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3697 /* Flush all writes and allow 20usec for all transactions to clear */
3698 IXGBE_WRITE_FLUSH(hw);
3701 /* restore previous register values */
3702 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3703 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3706 static const u8 ixgbe_emc_temp_data[4] = {
3707 IXGBE_EMC_INTERNAL_DATA,
3708 IXGBE_EMC_DIODE1_DATA,
3709 IXGBE_EMC_DIODE2_DATA,
3710 IXGBE_EMC_DIODE3_DATA
3712 static const u8 ixgbe_emc_therm_limit[4] = {
3713 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3714 IXGBE_EMC_DIODE1_THERM_LIMIT,
3715 IXGBE_EMC_DIODE2_THERM_LIMIT,
3716 IXGBE_EMC_DIODE3_THERM_LIMIT
3720 * ixgbe_get_ets_data - Extracts the ETS bit data
3721 * @hw: pointer to hardware structure
3722 * @ets_cfg: extected ETS data
3723 * @ets_offset: offset of ETS data
3725 * Returns error code.
3727 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3732 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3736 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
3737 status = IXGBE_NOT_IMPLEMENTED;
3741 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3745 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
3746 status = IXGBE_NOT_IMPLEMENTED;
3755 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
3756 * @hw: pointer to hardware structure
3758 * Returns the thermal sensor data structure
3760 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3768 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3770 /* Only support thermal sensors attached to physical port 0 */
3771 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3772 status = IXGBE_NOT_IMPLEMENTED;
3776 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3780 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3781 if (num_sensors > IXGBE_MAX_SENSORS)
3782 num_sensors = IXGBE_MAX_SENSORS;
3784 for (i = 0; i < num_sensors; i++) {
3788 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3793 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3794 IXGBE_ETS_DATA_INDEX_SHIFT);
3795 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3796 IXGBE_ETS_DATA_LOC_SHIFT);
3798 if (sensor_location != 0) {
3799 status = hw->phy.ops.read_i2c_byte(hw,
3800 ixgbe_emc_temp_data[sensor_index],
3801 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3802 &data->sensor[i].temp);
3812 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
3813 * @hw: pointer to hardware structure
3815 * Inits the thermal sensor thresholds according to the NVM map
3816 * and save off the threshold and location values into mac.thermal_sensor_data
3818 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3824 u8 low_thresh_delta;
3828 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3830 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3832 /* Only support thermal sensors attached to physical port 0 */
3833 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3834 status = IXGBE_NOT_IMPLEMENTED;
3838 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3842 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3843 IXGBE_ETS_LTHRES_DELTA_SHIFT);
3844 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3845 if (num_sensors > IXGBE_MAX_SENSORS)
3846 num_sensors = IXGBE_MAX_SENSORS;
3848 for (i = 0; i < num_sensors; i++) {
3852 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
3853 hw_err(hw, "eeprom read at offset %d failed\n",
3854 ets_offset + 1 + i);
3857 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3858 IXGBE_ETS_DATA_INDEX_SHIFT);
3859 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3860 IXGBE_ETS_DATA_LOC_SHIFT);
3861 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3863 hw->phy.ops.write_i2c_byte(hw,
3864 ixgbe_emc_therm_limit[sensor_index],
3865 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3867 if (sensor_location == 0)
3870 data->sensor[i].location = sensor_location;
3871 data->sensor[i].caution_thresh = therm_limit;
3872 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;