2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2007 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
25 #include <asm/octeon/octeon.h>
27 #include "ethernet-defines.h"
28 #include "octeon-ethernet.h"
29 #include "ethernet-mem.h"
30 #include "ethernet-rx.h"
31 #include "ethernet-tx.h"
32 #include "ethernet-mdio.h"
33 #include "ethernet-util.h"
35 #include <asm/octeon/cvmx-pip.h>
36 #include <asm/octeon/cvmx-pko.h>
37 #include <asm/octeon/cvmx-fau.h>
38 #include <asm/octeon/cvmx-ipd.h>
39 #include <asm/octeon/cvmx-helper.h>
40 #include <asm/octeon/cvmx-asxx-defs.h>
41 #include <asm/octeon/cvmx-gmxx-defs.h>
42 #include <asm/octeon/cvmx-smix-defs.h>
44 #define OCTEON_MAX_MTU 65392
46 static int num_packet_buffers = 1024;
47 module_param(num_packet_buffers, int, 0444);
48 MODULE_PARM_DESC(num_packet_buffers, "\n"
49 "\tNumber of packet buffers to allocate and store in the\n"
50 "\tFPA. By default, 1024 packet buffers are used.\n");
52 static int pow_receive_group = 15;
53 module_param(pow_receive_group, int, 0444);
54 MODULE_PARM_DESC(pow_receive_group, "\n"
55 "\tPOW group to receive packets from. All ethernet hardware\n"
56 "\twill be configured to send incoming packets to this POW\n"
57 "\tgroup. Also any other software can submit packets to this\n"
58 "\tgroup for the kernel to process.");
60 static int receive_group_order;
61 module_param(receive_group_order, int, 0444);
62 MODULE_PARM_DESC(receive_group_order, "\n"
63 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
64 "\twill be configured to send incoming packets to multiple POW\n"
65 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
66 "\tgroups are taken into use and groups are allocated starting\n"
67 "\tfrom 0. By default, a single group is used.\n");
69 int pow_send_group = -1;
70 module_param(pow_send_group, int, 0644);
71 MODULE_PARM_DESC(pow_send_group, "\n"
72 "\tPOW group to send packets to other software on. This\n"
73 "\tcontrols the creation of the virtual device pow0.\n"
74 "\talways_use_pow also depends on this value.");
77 module_param(always_use_pow, int, 0444);
78 MODULE_PARM_DESC(always_use_pow, "\n"
79 "\tWhen set, always send to the pow group. This will cause\n"
80 "\tpackets sent to real ethernet devices to be sent to the\n"
81 "\tPOW group instead of the hardware. Unless some other\n"
82 "\tapplication changes the config, packets will still be\n"
83 "\treceived from the low level hardware. Use this option\n"
84 "\tto allow a CVMX app to intercept all packets from the\n"
85 "\tlinux kernel. You must specify pow_send_group along with\n"
88 char pow_send_list[128] = "";
89 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
90 MODULE_PARM_DESC(pow_send_list, "\n"
91 "\tComma separated list of ethernet devices that should use the\n"
92 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
93 "\tis a per port version of always_use_pow. always_use_pow takes\n"
94 "\tprecedence over this list. For example, setting this to\n"
95 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
96 "\tusing the pow_send_group.");
98 int rx_napi_weight = 32;
99 module_param(rx_napi_weight, int, 0444);
100 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
102 /* Mask indicating which receive groups are in use. */
103 int pow_receive_groups;
106 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
108 * Set to one right before cvm_oct_poll_queue is destroyed.
110 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
113 * Array of every ethernet device owned by this driver indexed by
114 * the ipd input port number.
116 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
118 u64 cvm_oct_tx_poll_interval;
120 static void cvm_oct_rx_refill_worker(struct work_struct *work);
121 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
123 static void cvm_oct_rx_refill_worker(struct work_struct *work)
126 * FPA 0 may have been drained, try to refill it if we need
127 * more than num_packet_buffers / 2, otherwise normal receive
128 * processing will refill it. If it were drained, no packets
129 * could be received so cvm_oct_napi_poll would never be
130 * invoked to do the refill.
132 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
134 if (!atomic_read(&cvm_oct_poll_queue_stopping))
135 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
138 static void cvm_oct_periodic_worker(struct work_struct *work)
140 struct octeon_ethernet *priv = container_of(work,
141 struct octeon_ethernet,
142 port_periodic_work.work);
145 priv->poll(cvm_oct_device[priv->port]);
147 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
148 cvm_oct_device[priv->port]);
150 if (!atomic_read(&cvm_oct_poll_queue_stopping))
151 schedule_delayed_work(&priv->port_periodic_work, HZ);
154 static void cvm_oct_configure_common_hw(void)
158 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
160 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
162 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
163 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
164 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
166 #ifdef __LITTLE_ENDIAN
168 union cvmx_ipd_ctl_status ipd_ctl_status;
170 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
171 ipd_ctl_status.s.pkt_lend = 1;
172 ipd_ctl_status.s.wqe_lend = 1;
173 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
177 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
181 * cvm_oct_free_work- Free a work queue entry
183 * @work_queue_entry: Work queue entry to free
185 * Returns Zero on success, Negative on failure.
187 int cvm_oct_free_work(void *work_queue_entry)
189 cvmx_wqe_t *work = work_queue_entry;
191 int segments = work->word2.s.bufs;
192 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
195 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
196 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
197 if (unlikely(!segment_ptr.s.i))
198 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
200 CVMX_FPA_PACKET_POOL_SIZE / 128);
201 segment_ptr = next_ptr;
203 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
207 EXPORT_SYMBOL(cvm_oct_free_work);
210 * cvm_oct_common_get_stats - get the low level ethernet statistics
211 * @dev: Device to get the statistics from
213 * Returns Pointer to the statistics
215 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
217 cvmx_pip_port_status_t rx_status;
218 cvmx_pko_port_status_t tx_status;
219 struct octeon_ethernet *priv = netdev_priv(dev);
221 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
222 if (octeon_is_simulation()) {
223 /* The simulator doesn't support statistics */
224 memset(&rx_status, 0, sizeof(rx_status));
225 memset(&tx_status, 0, sizeof(tx_status));
227 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
228 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
231 priv->stats.rx_packets += rx_status.inb_packets;
232 priv->stats.tx_packets += tx_status.packets;
233 priv->stats.rx_bytes += rx_status.inb_octets;
234 priv->stats.tx_bytes += tx_status.octets;
235 priv->stats.multicast += rx_status.multicast_packets;
236 priv->stats.rx_crc_errors += rx_status.inb_errors;
237 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
238 priv->stats.rx_dropped += rx_status.dropped_packets;
245 * cvm_oct_common_change_mtu - change the link MTU
246 * @dev: Device to change
247 * @new_mtu: The new MTU
249 * Returns Zero on success
251 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
253 struct octeon_ethernet *priv = netdev_priv(dev);
254 int interface = INTERFACE(priv->port);
255 #if IS_ENABLED(CONFIG_VLAN_8021Q)
256 int vlan_bytes = VLAN_HLEN;
260 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
263 * Limit the MTU to make sure the ethernet packets are between
264 * 64 bytes and 65535 bytes.
266 if ((new_mtu + mtu_overhead < VLAN_ETH_ZLEN) ||
267 (new_mtu + mtu_overhead > OCTEON_MAX_MTU)) {
268 pr_err("MTU must be between %d and %d.\n",
269 VLAN_ETH_ZLEN - mtu_overhead,
270 OCTEON_MAX_MTU - mtu_overhead);
275 if ((interface < 2) &&
276 (cvmx_helper_interface_get_mode(interface) !=
277 CVMX_HELPER_INTERFACE_MODE_SPI)) {
278 int index = INDEX(priv->port);
279 /* Add ethernet header and FCS, and VLAN if configured. */
280 int max_packet = new_mtu + mtu_overhead;
282 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
283 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
284 /* Signal errors on packets larger than the MTU */
285 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
289 * Set the hardware to truncate packets larger
290 * than the MTU and smaller the 64 bytes.
292 union cvmx_pip_frm_len_chkx frm_len_chk;
295 frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
296 frm_len_chk.s.maxlen = max_packet;
297 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
301 * Set the hardware to truncate packets larger than
302 * the MTU. The jabber register must be set to a
303 * multiple of 8 bytes, so round up.
305 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
306 (max_packet + 7) & ~7u);
312 * cvm_oct_common_set_multicast_list - set the multicast list
313 * @dev: Device to work on
315 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
317 union cvmx_gmxx_prtx_cfg gmx_cfg;
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
321 if ((interface < 2) &&
322 (cvmx_helper_interface_get_mode(interface) !=
323 CVMX_HELPER_INTERFACE_MODE_SPI)) {
324 union cvmx_gmxx_rxx_adr_ctl control;
325 int index = INDEX(priv->port);
328 control.s.bcst = 1; /* Allow broadcast MAC addresses */
330 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
331 (dev->flags & IFF_PROMISC))
332 /* Force accept multicast packets */
335 /* Force reject multicast packets */
338 if (dev->flags & IFF_PROMISC)
340 * Reject matches if promisc. Since CAM is
341 * shut off, should accept everything.
343 control.s.cam_mode = 0;
345 /* Filter packets based on the CAM */
346 control.s.cam_mode = 1;
349 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
350 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
351 gmx_cfg.u64 & ~1ull);
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
355 if (dev->flags & IFF_PROMISC)
356 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 (index, interface), 0);
359 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 (index, interface), 1);
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
367 static int cvm_oct_set_mac_filter(struct net_device *dev)
369 struct octeon_ethernet *priv = netdev_priv(dev);
370 union cvmx_gmxx_prtx_cfg gmx_cfg;
371 int interface = INTERFACE(priv->port);
373 if ((interface < 2) &&
374 (cvmx_helper_interface_get_mode(interface) !=
375 CVMX_HELPER_INTERFACE_MODE_SPI)) {
377 u8 *ptr = dev->dev_addr;
379 int index = INDEX(priv->port);
381 for (i = 0; i < 6; i++)
382 mac = (mac << 8) | (u64)ptr[i];
385 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
386 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
387 gmx_cfg.u64 & ~1ull);
389 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
392 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
394 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
396 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
398 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
400 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
402 cvm_oct_common_set_multicast_list(dev);
403 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
410 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
411 * @dev: The device in question.
412 * @addr: Socket address.
414 * Returns Zero on success
416 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
418 int r = eth_mac_addr(dev, addr);
422 return cvm_oct_set_mac_filter(dev);
426 * cvm_oct_common_init - per network device initialization
427 * @dev: Device to initialize
429 * Returns Zero on success
431 int cvm_oct_common_init(struct net_device *dev)
433 struct octeon_ethernet *priv = netdev_priv(dev);
434 const u8 *mac = NULL;
437 mac = of_get_mac_address(priv->of_node);
440 ether_addr_copy(dev->dev_addr, mac);
442 eth_hw_addr_random(dev);
445 * Force the interface to use the POW send if always_use_pow
446 * was specified or it is in the pow send list.
448 if ((pow_send_group != -1) &&
449 (always_use_pow || strstr(pow_send_list, dev->name)))
452 if (priv->queue != -1)
453 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
455 /* We do our own locking, Linux doesn't need to */
456 dev->features |= NETIF_F_LLTX;
457 dev->ethtool_ops = &cvm_oct_ethtool_ops;
459 cvm_oct_set_mac_filter(dev);
460 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
463 * Zero out stats for port so we won't mistakenly show
464 * counters from the bootloader.
466 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
467 sizeof(struct net_device_stats));
469 if (dev->netdev_ops->ndo_stop)
470 dev->netdev_ops->ndo_stop(dev);
475 void cvm_oct_common_uninit(struct net_device *dev)
478 phy_disconnect(dev->phydev);
481 int cvm_oct_common_open(struct net_device *dev,
482 void (*link_poll)(struct net_device *))
484 union cvmx_gmxx_prtx_cfg gmx_cfg;
485 struct octeon_ethernet *priv = netdev_priv(dev);
486 int interface = INTERFACE(priv->port);
487 int index = INDEX(priv->port);
488 cvmx_helper_link_info_t link_info;
491 rv = cvm_oct_phy_setup_device(dev);
495 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
497 if (octeon_has_feature(OCTEON_FEATURE_PKND))
498 gmx_cfg.s.pknd = priv->port;
499 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
501 if (octeon_is_simulation())
505 int r = phy_read_status(dev->phydev);
507 if (r == 0 && dev->phydev->link == 0)
508 netif_carrier_off(dev);
509 cvm_oct_adjust_link(dev);
511 link_info = cvmx_helper_link_get(priv->port);
512 if (!link_info.s.link_up)
513 netif_carrier_off(dev);
514 priv->poll = link_poll;
521 void cvm_oct_link_poll(struct net_device *dev)
523 struct octeon_ethernet *priv = netdev_priv(dev);
524 cvmx_helper_link_info_t link_info;
526 link_info = cvmx_helper_link_get(priv->port);
527 if (link_info.u64 == priv->link_info)
530 if (cvmx_helper_link_set(priv->port, link_info))
531 link_info.u64 = priv->link_info;
533 priv->link_info = link_info.u64;
535 if (link_info.s.link_up) {
536 if (!netif_carrier_ok(dev))
537 netif_carrier_on(dev);
538 } else if (netif_carrier_ok(dev)) {
539 netif_carrier_off(dev);
541 cvm_oct_note_carrier(priv, link_info);
544 static int cvm_oct_xaui_open(struct net_device *dev)
546 return cvm_oct_common_open(dev, cvm_oct_link_poll);
549 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
550 .ndo_init = cvm_oct_common_init,
551 .ndo_uninit = cvm_oct_common_uninit,
552 .ndo_start_xmit = cvm_oct_xmit,
553 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
554 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
555 .ndo_do_ioctl = cvm_oct_ioctl,
556 .ndo_change_mtu = cvm_oct_common_change_mtu,
557 .ndo_get_stats = cvm_oct_common_get_stats,
558 #ifdef CONFIG_NET_POLL_CONTROLLER
559 .ndo_poll_controller = cvm_oct_poll_controller,
563 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
564 .ndo_init = cvm_oct_common_init,
565 .ndo_uninit = cvm_oct_common_uninit,
566 .ndo_open = cvm_oct_xaui_open,
567 .ndo_stop = cvm_oct_common_stop,
568 .ndo_start_xmit = cvm_oct_xmit,
569 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
570 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
571 .ndo_do_ioctl = cvm_oct_ioctl,
572 .ndo_change_mtu = cvm_oct_common_change_mtu,
573 .ndo_get_stats = cvm_oct_common_get_stats,
574 #ifdef CONFIG_NET_POLL_CONTROLLER
575 .ndo_poll_controller = cvm_oct_poll_controller,
579 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
580 .ndo_init = cvm_oct_sgmii_init,
581 .ndo_uninit = cvm_oct_common_uninit,
582 .ndo_open = cvm_oct_sgmii_open,
583 .ndo_stop = cvm_oct_common_stop,
584 .ndo_start_xmit = cvm_oct_xmit,
585 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
586 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
587 .ndo_do_ioctl = cvm_oct_ioctl,
588 .ndo_change_mtu = cvm_oct_common_change_mtu,
589 .ndo_get_stats = cvm_oct_common_get_stats,
590 #ifdef CONFIG_NET_POLL_CONTROLLER
591 .ndo_poll_controller = cvm_oct_poll_controller,
595 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
596 .ndo_init = cvm_oct_spi_init,
597 .ndo_uninit = cvm_oct_spi_uninit,
598 .ndo_start_xmit = cvm_oct_xmit,
599 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
600 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
601 .ndo_do_ioctl = cvm_oct_ioctl,
602 .ndo_change_mtu = cvm_oct_common_change_mtu,
603 .ndo_get_stats = cvm_oct_common_get_stats,
604 #ifdef CONFIG_NET_POLL_CONTROLLER
605 .ndo_poll_controller = cvm_oct_poll_controller,
609 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
610 .ndo_init = cvm_oct_common_init,
611 .ndo_uninit = cvm_oct_common_uninit,
612 .ndo_open = cvm_oct_rgmii_open,
613 .ndo_stop = cvm_oct_common_stop,
614 .ndo_start_xmit = cvm_oct_xmit,
615 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
616 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
617 .ndo_do_ioctl = cvm_oct_ioctl,
618 .ndo_change_mtu = cvm_oct_common_change_mtu,
619 .ndo_get_stats = cvm_oct_common_get_stats,
620 #ifdef CONFIG_NET_POLL_CONTROLLER
621 .ndo_poll_controller = cvm_oct_poll_controller,
625 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
626 .ndo_init = cvm_oct_common_init,
627 .ndo_start_xmit = cvm_oct_xmit_pow,
628 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
629 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
630 .ndo_do_ioctl = cvm_oct_ioctl,
631 .ndo_change_mtu = cvm_oct_common_change_mtu,
632 .ndo_get_stats = cvm_oct_common_get_stats,
633 #ifdef CONFIG_NET_POLL_CONTROLLER
634 .ndo_poll_controller = cvm_oct_poll_controller,
638 static struct device_node *cvm_oct_of_get_child(
639 const struct device_node *parent, int reg_val)
641 struct device_node *node = NULL;
646 node = of_get_next_child(parent, node);
649 addr = of_get_property(node, "reg", &size);
650 if (addr && (be32_to_cpu(*addr) == reg_val))
656 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
657 int interface, int port)
659 struct device_node *ni, *np;
661 ni = cvm_oct_of_get_child(pip, interface);
665 np = cvm_oct_of_get_child(ni, port);
671 static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
675 if (!of_property_read_u32(np, "rx-delay", &delay_value))
676 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
677 if (!of_property_read_u32(np, "tx-delay", &delay_value))
678 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
681 static int cvm_oct_probe(struct platform_device *pdev)
685 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
687 struct device_node *pip;
689 octeon_mdiobus_force_mod_depencency();
691 pip = pdev->dev.of_node;
693 pr_err("Error: No 'pip' in /aliases\n");
697 cvm_oct_configure_common_hw();
699 cvmx_helper_initialize_packet_io_global();
701 if (receive_group_order) {
702 if (receive_group_order > 4)
703 receive_group_order = 4;
704 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
706 pow_receive_groups = BIT(pow_receive_group);
709 /* Change the input group for all ports before input is enabled */
710 num_interfaces = cvmx_helper_get_number_of_interfaces();
711 for (interface = 0; interface < num_interfaces; interface++) {
712 int num_ports = cvmx_helper_ports_on_interface(interface);
715 for (port = cvmx_helper_get_ipd_port(interface, 0);
716 port < cvmx_helper_get_ipd_port(interface, num_ports);
718 union cvmx_pip_prt_tagx pip_prt_tagx;
721 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
723 if (receive_group_order) {
726 /* We support only 16 groups at the moment, so
727 * always disable the two additional "hidden"
728 * tag_mask bits on CN68XX.
730 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
731 pip_prt_tagx.u64 |= 0x3ull << 44;
733 tag_mask = ~((1 << receive_group_order) - 1);
734 pip_prt_tagx.s.grptagbase = 0;
735 pip_prt_tagx.s.grptagmask = tag_mask;
736 pip_prt_tagx.s.grptag = 1;
737 pip_prt_tagx.s.tag_mode = 0;
738 pip_prt_tagx.s.inc_prt_flag = 1;
739 pip_prt_tagx.s.ip6_dprt_flag = 1;
740 pip_prt_tagx.s.ip4_dprt_flag = 1;
741 pip_prt_tagx.s.ip6_sprt_flag = 1;
742 pip_prt_tagx.s.ip4_sprt_flag = 1;
743 pip_prt_tagx.s.ip6_dst_flag = 1;
744 pip_prt_tagx.s.ip4_dst_flag = 1;
745 pip_prt_tagx.s.ip6_src_flag = 1;
746 pip_prt_tagx.s.ip4_src_flag = 1;
747 pip_prt_tagx.s.grp = 0;
749 pip_prt_tagx.s.grptag = 0;
750 pip_prt_tagx.s.grp = pow_receive_group;
753 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
758 cvmx_helper_ipd_and_packet_input_enable();
760 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
763 * Initialize the FAU used for counting packet buffers that
766 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
768 /* Initialize the FAU used for counting tx SKBs that need to be freed */
769 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
771 if ((pow_send_group != -1)) {
772 struct net_device *dev;
774 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
776 /* Initialize the device private structure. */
777 struct octeon_ethernet *priv = netdev_priv(dev);
779 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
780 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
781 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
783 strcpy(dev->name, "pow%d");
784 for (qos = 0; qos < 16; qos++)
785 skb_queue_head_init(&priv->tx_free_list[qos]);
787 if (register_netdev(dev) < 0) {
788 pr_err("Failed to register ethernet device for POW\n");
791 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
792 pr_info("%s: POW send group %d, receive group %d\n",
793 dev->name, pow_send_group,
797 pr_err("Failed to allocate ethernet device for POW\n");
801 num_interfaces = cvmx_helper_get_number_of_interfaces();
802 for (interface = 0; interface < num_interfaces; interface++) {
803 cvmx_helper_interface_mode_t imode =
804 cvmx_helper_interface_get_mode(interface);
805 int num_ports = cvmx_helper_ports_on_interface(interface);
810 port = cvmx_helper_get_ipd_port(interface, 0);
811 port < cvmx_helper_get_ipd_port(interface, num_ports);
812 port_index++, port++) {
813 struct octeon_ethernet *priv;
814 struct net_device *dev =
815 alloc_etherdev(sizeof(struct octeon_ethernet));
817 pr_err("Failed to allocate ethernet device for port %d\n",
822 /* Initialize the device private structure. */
823 priv = netdev_priv(dev);
825 priv->of_node = cvm_oct_node_for_port(pip, interface,
828 INIT_DELAYED_WORK(&priv->port_periodic_work,
829 cvm_oct_periodic_worker);
832 priv->queue = cvmx_pko_get_base_queue(priv->port);
833 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
834 for (qos = 0; qos < 16; qos++)
835 skb_queue_head_init(&priv->tx_free_list[qos]);
836 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
838 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
840 switch (priv->imode) {
841 /* These types don't support ports to IPD/PKO */
842 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
843 case CVMX_HELPER_INTERFACE_MODE_PCIE:
844 case CVMX_HELPER_INTERFACE_MODE_PICMG:
847 case CVMX_HELPER_INTERFACE_MODE_NPI:
848 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
849 strcpy(dev->name, "npi%d");
852 case CVMX_HELPER_INTERFACE_MODE_XAUI:
853 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
854 strcpy(dev->name, "xaui%d");
857 case CVMX_HELPER_INTERFACE_MODE_LOOP:
858 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
859 strcpy(dev->name, "loop%d");
862 case CVMX_HELPER_INTERFACE_MODE_SGMII:
863 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
864 strcpy(dev->name, "eth%d");
867 case CVMX_HELPER_INTERFACE_MODE_SPI:
868 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
869 strcpy(dev->name, "spi%d");
872 case CVMX_HELPER_INTERFACE_MODE_RGMII:
873 case CVMX_HELPER_INTERFACE_MODE_GMII:
874 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
875 strcpy(dev->name, "eth%d");
876 cvm_set_rgmii_delay(priv->of_node, interface,
881 if (!dev->netdev_ops) {
883 } else if (register_netdev(dev) < 0) {
884 pr_err("Failed to register ethernet device for interface %d, port %d\n",
885 interface, priv->port);
888 cvm_oct_device[priv->port] = dev;
890 cvmx_pko_get_num_queues(priv->port) *
892 schedule_delayed_work(&priv->port_periodic_work, HZ);
897 cvm_oct_tx_initialize();
898 cvm_oct_rx_initialize();
901 * 150 uS: about 10 1500-byte packets at 1GE.
903 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
905 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
910 static int cvm_oct_remove(struct platform_device *pdev)
916 atomic_inc_return(&cvm_oct_poll_queue_stopping);
917 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
919 cvm_oct_rx_shutdown();
920 cvm_oct_tx_shutdown();
924 /* Free the ethernet devices */
925 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
926 if (cvm_oct_device[port]) {
927 struct net_device *dev = cvm_oct_device[port];
928 struct octeon_ethernet *priv = netdev_priv(dev);
930 cancel_delayed_work_sync(&priv->port_periodic_work);
932 cvm_oct_tx_shutdown_dev(dev);
933 unregister_netdev(dev);
935 cvm_oct_device[port] = NULL;
943 /* Free the HW pools */
944 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
946 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
948 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
949 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
950 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
954 static const struct of_device_id cvm_oct_match[] = {
956 .compatible = "cavium,octeon-3860-pip",
960 MODULE_DEVICE_TABLE(of, cvm_oct_match);
962 static struct platform_driver cvm_oct_driver = {
963 .probe = cvm_oct_probe,
964 .remove = cvm_oct_remove,
966 .name = KBUILD_MODNAME,
967 .of_match_table = cvm_oct_match,
971 module_platform_driver(cvm_oct_driver);
973 MODULE_LICENSE("GPL");
975 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");