W: http://www.baycom.org/~tom/ham/ham.html
S: Maintained
- BCM43XX WIRELESS DRIVER
- P: Michael Buesch
+ BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION)
+ P: Larry Finger
P: Stefano Brivio
W: http://bcm43xx.berlios.de/
S: Supported
+ QLOGIC QLA3XXX NETWORK DRIVER
+ P: Ron Mercer
+ S: Supported
+
QNX4 FILESYSTEM
P: Anders Larsen
S: Supported
S390 ZFCP DRIVER
-P: Andreas Herrmann
-M: aherrman@de.ibm.com
+P: Swen Schillig
+M: swen@vnet.ibm.com
W: http://www.ibm.com/developerworks/linux/linux390/
S: Maintained
+ SOFTMAC LAYER (IEEE 802.11)
+ P: Johannes Berg
+ P: Joe Jezak
+ P: Daniel Drake
+ W: http://softmac.sipsolutions.net/
+ S: Maintained
+
SOFTWARE RAID (Multiple Disks) SUPPORT
P: Ingo Molnar
S: Maintained
TULIP NETWORK DRIVER
- P: Jeff Garzik
- M: jgarzik@pobox.com
+ P: Valerie Henson
W: http://sourceforge.net/projects/tulip/
S: Maintained
S: Maintained
+ ZD1211RW WIRELESS DRIVER
+ P: Daniel Drake
+ P: Ulrich Kunitz
+ W: http://zd1211.ath.cx/wiki/DriverRewrite
+ S: Maintained
+
ZF MACHZ WATCHDOG
P: Fernando Fuganti
#endif
static void vortex_tx_timeout(struct net_device *dev);
static void acpi_set_WOL(struct net_device *dev);
- static struct ethtool_ops vortex_ethtool_ops;
+ static const struct ethtool_ops vortex_ethtool_ops;
static void set_8021q_mode(struct net_device *dev, int enable);
/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
local_irq_disable();
(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev,NULL);
local_irq_restore(flags);
- }
+ }
#endif
#ifdef CONFIG_PM
vp = netdev_priv(dev);
ioaddr = vp->ioaddr;
-
+
unregister_netdev(dev);
iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
eisa_found = 1;
}
#endif
-
+
/* Special code to work-around the Compaq PCI BIOS32 problem. */
if (compaq_ioaddr) {
vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
struct vortex_chip_info *vci;
void __iomem *ioaddr;
- /* wake up and enable device */
+ /* wake up and enable device */
rc = pci_enable_device(pdev);
if (rc < 0)
goto out;
if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
vp->must_free_region = 1;
- /* enable bus-mastering if necessary */
+ /* enable bus-mastering if necessary */
if (vci->flags & PCI_USES_MASTER)
pci_set_master(pdev);
vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
/* if we are a PCI driver, we store info in pdev->driver_data
- * instead of a module list */
+ * instead of a module list */
if (pdev)
pci_set_drvdata(pdev, dev);
if (edev)
dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = poll_vortex;
+ dev->poll_controller = poll_vortex;
#endif
if (pdev) {
vp->pm_state_valid = 1;
vp->stats.tx_dropped++;
netif_wake_queue(dev);
}
-
+
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies;
-
+
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
}
vp->tx_ring[entry].next = 0;
#if DO_ZEROCOPY
- if (skb->ip_summed != CHECKSUM_HW)
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
else
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
break; /* It still hasn't been processed. */
#endif
-
+
if (vp->tx_skbuff[entry]) {
struct sk_buff *skb = vp->tx_skbuff[entry];
- #if DO_ZEROCOPY
+ #if DO_ZEROCOPY
int i;
for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
pci_unmap_single(VORTEX_PCI(vp),
"not using them!\n", dev->name);
}
#endif
-
+
free_irq(dev->irq, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
if (vortex_debug > 0) {
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
-
+
if (vp->full_bus_master_tx) {
int i;
int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
}
}
- static struct ethtool_ops vortex_ethtool_ops = {
+ static const struct ethtool_ops vortex_ethtool_ops = {
.get_drvinfo = vortex_get_drvinfo,
.get_strings = vortex_get_strings,
.get_msglevel = vortex_get_msglevel,
int new_mode;
if (dev->flags & IFF_PROMISC) {
- if (vortex_debug > 0)
+ if (vortex_debug > 3)
printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
{
int pci_rc, eisa_rc;
- pci_rc = pci_module_init(&vortex_driver);
+ pci_rc = pci_register_driver(&vortex_driver);
eisa_rc = vortex_eisa_init();
if (pci_rc == 0)
/* Take care of the EISA devices */
eisa_driver_unregister(&vortex_eisa_driver);
#endif
-
+
if (compaq_net_device) {
vp = compaq_net_device->priv;
ioaddr = ioport_map(compaq_net_device->base_addr,
*/
#define DRV_NAME "8139cp"
- #define DRV_VERSION "1.2"
+ #define DRV_VERSION "1.3"
#define DRV_RELDATE "Mar 22, 2004"
u64 addr;
};
- struct ring_info {
- struct sk_buff *skb;
- dma_addr_t mapping;
- u32 len;
- };
-
struct cp_dma_stats {
u64 tx_ok;
u64 rx_ok;
struct net_device_stats net_stats;
struct cp_extra_stats cp_stats;
- unsigned rx_tail ____cacheline_aligned;
+ unsigned rx_head ____cacheline_aligned;
+ unsigned rx_tail;
struct cp_desc *rx_ring;
- struct ring_info rx_skb[CP_RX_RING_SIZE];
- unsigned rx_buf_sz;
+ struct sk_buff *rx_skb[CP_RX_RING_SIZE];
unsigned tx_head ____cacheline_aligned;
unsigned tx_tail;
-
struct cp_desc *tx_ring;
- struct ring_info tx_skb[CP_TX_RING_SIZE];
- dma_addr_t ring_dma;
+ struct sk_buff *tx_skb[CP_TX_RING_SIZE];
+
+ unsigned rx_buf_sz;
+ unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
#if CP_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
-
- unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
+ dma_addr_t ring_dma;
struct mii_if_info mii_if;
};
struct ethtool_eeprom *eeprom, u8 *data);
static struct pci_device_id cp_pci_tbl[] = {
- { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
};
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
struct cp_desc *desc;
unsigned buflen;
- skb = cp->rx_skb[rx_tail].skb;
+ skb = cp->rx_skb[rx_tail];
BUG_ON(!skb);
desc = &cp->rx_ring[rx_tail];
break;
len = (status & 0x1fff) - 4;
- mapping = cp->rx_skb[rx_tail].mapping;
+ mapping = le64_to_cpu(desc->addr);
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
/* we don't support incoming fragmented frames.
if (netif_msg_rx_status(cp))
printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
- cp->dev->name, rx_tail, status, len);
+ dev->name, rx_tail, status, len);
buflen = cp->rx_buf_sz + RX_OFFSET;
new_skb = dev_alloc_skb (buflen);
}
skb_reserve(new_skb, RX_OFFSET);
- new_skb->dev = cp->dev;
+ new_skb->dev = dev;
pci_unmap_single(cp->pdev, mapping,
buflen, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
- mapping =
- cp->rx_skb[rx_tail].mapping =
- pci_map_single(cp->pdev, new_skb->data,
- buflen, PCI_DMA_FROMDEVICE);
- cp->rx_skb[rx_tail].skb = new_skb;
+ mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
+ cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
unsigned tx_tail = cp->tx_tail;
while (tx_tail != tx_head) {
+ struct cp_desc *txd = cp->tx_ring + tx_tail;
struct sk_buff *skb;
u32 status;
rmb();
- status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
+ status = le32_to_cpu(txd->opts1);
if (status & DescOwn)
break;
- skb = cp->tx_skb[tx_tail].skb;
+ skb = cp->tx_skb[tx_tail];
BUG_ON(!skb);
- pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
- cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
+ pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
+ le32_to_cpu(txd->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
dev_kfree_skb_irq(skb);
}
- cp->tx_skb[tx_tail].skb = NULL;
+ cp->tx_skb[tx_tail] = NULL;
tx_tail = NEXT_TX(tx_tail);
}
if (mss)
flags |= LargeSend | ((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_HW) {
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
flags |= IPCS | TCPCS;
txd->opts1 = cpu_to_le32(flags);
wmb();
- cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = mapping;
- cp->tx_skb[entry].len = len;
+ cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
} else {
struct cp_desc *txd;
first_len = skb_headlen(skb);
first_mapping = pci_map_single(cp->pdev, skb->data,
first_len, PCI_DMA_TODEVICE);
- cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = first_mapping;
- cp->tx_skb[entry].len = first_len;
+ cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
if (mss)
ctrl |= LargeSend |
((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_HW) {
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip->protocol == IPPROTO_TCP)
ctrl |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
txd->opts1 = cpu_to_le32(ctrl);
wmb();
- cp->tx_skb[entry].skb = skb;
- cp->tx_skb[entry].mapping = mapping;
- cp->tx_skb[entry].len = len;
+ cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
}
txd->addr = cpu_to_le64(first_mapping);
wmb();
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip->protocol == IPPROTO_TCP)
txd->opts1 = cpu_to_le32(first_eor | first_len |
FirstFrag | DescOwn |
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
if (!skb)
skb->dev = cp->dev;
skb_reserve(skb, RX_OFFSET);
- cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
- skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- cp->rx_skb[i].skb = skb;
+ mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;
- cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
+ cp->rx_ring[i].addr = cpu_to_le64(mapping);
if (i == (CP_RX_RING_SIZE - 1))
cp->rx_ring[i].opts1 =
cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
static void cp_clean_rings (struct cp_private *cp)
{
+ struct cp_desc *desc;
unsigned i;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
- if (cp->rx_skb[i].skb) {
- pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
+ if (cp->rx_skb[i]) {
+ desc = cp->rx_ring + i;
+ pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(cp->rx_skb[i].skb);
+ dev_kfree_skb(cp->rx_skb[i]);
}
}
for (i = 0; i < CP_TX_RING_SIZE; i++) {
- if (cp->tx_skb[i].skb) {
- struct sk_buff *skb = cp->tx_skb[i].skb;
-
- pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
- cp->tx_skb[i].len, PCI_DMA_TODEVICE);
- if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
+ if (cp->tx_skb[i]) {
+ struct sk_buff *skb = cp->tx_skb[i];
+
+ desc = cp->tx_ring + i;
+ pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
+ if (le32_to_cpu(desc->opts1) & LastFrag)
dev_kfree_skb(skb);
cp->net_stats.tx_dropped++;
}
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
- memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
- memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
+ memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
+ memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
}
static void cp_free_rings (struct cp_private *cp)
pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
}
- static struct ethtool_ops cp_ethtool_ops = {
+ static const struct ethtool_ops cp_ethtool_ops = {
.get_drvinfo = cp_get_drvinfo,
.get_regs_len = cp_get_regs_len,
.get_stats_count = cp_get_stats_count,
struct net_device *dev = pci_get_drvdata(pdev);
struct cp_private *cp = netdev_priv(dev);
- BUG_ON(!dev);
unregister_netdev(dev);
iounmap(cp->regs);
if (cp->wol_enabled)
#ifdef CONFIG_PM
static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
{
- struct net_device *dev;
- struct cp_private *cp;
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- dev = pci_get_drvdata (pdev);
- cp = netdev_priv(dev);
-
- if (!dev || !netif_running (dev)) return 0;
+ if (!netif_running(dev))
+ return 0;
netif_device_detach (dev);
netif_stop_queue (dev);
#ifdef MODULE
printk("%s", version);
#endif
- return pci_module_init (&cp_driver);
+ return pci_register_driver(&cp_driver);
}
static void __exit cp_exit (void)
#endif
#ifndef PCI_VENDOR_ID_ALTEON
- #define PCI_VENDOR_ID_ALTEON 0x12ae
+ #define PCI_VENDOR_ID_ALTEON 0x12ae
#endif
#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
- static char version[] __devinitdata =
+ static char version[] __devinitdata =
" http://home.cern.ch/~jes/gige/acenic.html\n";
static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
- static struct ethtool_ops ace_ethtool_ops = {
+ static const struct ethtool_ops ace_ethtool_ops = {
.get_settings = ace_get_settings,
.set_settings = ace_set_settings,
.get_drvinfo = ace_get_drvinfo,
pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
- /* OpenFirmware on Mac's does not set this - DOH.. */
+ /* OpenFirmware on Mac's does not set this - DOH.. */
if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
"access - was not enabled by BIOS/Firmware\n",
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
if (ap->version >= 2)
writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
-
+
/*
* This clears any pending interrupts
*/
static int __init acenic_init(void)
{
- return pci_module_init(&acenic_pci_driver);
+ return pci_register_driver(&acenic_pci_driver);
}
static void __exit acenic_exit(void)
printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
"latency: %i clks\n",
(pci_state & PCI_32BIT) ? 32 : 64,
- (pci_state & PCI_66MHZ) ? 66 : 33,
+ (pci_state & PCI_66MHZ) ? 66 : 33,
ap->pci_latency);
/*
pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
}
#endif
-
+
/*
* Configure DMA attributes.
*/
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES))));
info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
- info->rx_mini_ctrl.flags =
+ info->rx_mini_ctrl.flags =
RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG;
for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
if (ACE_IS_TIGON_I(ap)) {
ap->tx_ring = (struct tx_desc *) regs->Window;
- for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
+ for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
* sizeof(struct tx_desc)) / sizeof(u32); i++)
writel(0, (void __iomem *)ap->tx_ring + i * 4);
{
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
-
+
prefetchw(&ap->cur_rx_bufs);
prefetchw(&ap->cur_rx_bufs);
prefetchw(&ap->cur_mini_bufs);
-
+
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
/* make sure the rx descriptor isn't read before rxretprd */
- if (idx == rxretcsm)
+ if (idx == rxretcsm)
rmb();
retdesc = &ap->rx_return_ring[idx];
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
rxdesc = &ap->rx_mini_ring[skbidx];
- mini_count++;
+ mini_count++;
break;
default:
printk(KERN_INFO "%s: unknown frame type (0x%02x) "
*/
if (bd_flags & BD_FLG_TCP_UDP_SUM) {
skb->csum = htons(csum);
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
} else {
skb->ip_summed = CHECKSUM_NONE;
}
*/
netif_stop_queue(dev);
-
+
if (ap->promisc) {
cmd.evt = C_SET_PROMISC_MODE;
cmd.code = C_C_PROMISC_DISABLE;
if (mapping) {
if (ACE_IS_TIGON_I(ap)) {
- struct tx_desc __iomem *tx
+ struct tx_desc __iomem *tx
= (struct tx_desc __iomem *) &ap->tx_ring[i];
writel(0, &tx->addr.addrhi);
writel(0, &tx->addr.addrlo);
mapping = ace_map_tx_skb(ap, skb, skb, idx);
flagsize = (skb->len << 16) | (BD_FLG_END);
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
#if ACENIC_DO_VLAN
if (vlan_tx_tag_present(skb)) {
mapping = ace_map_tx_skb(ap, skb, NULL, idx);
flagsize = (skb_headlen(skb) << 16);
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
#if ACENIC_DO_VLAN
if (vlan_tx_tag_present(skb)) {
PCI_DMA_TODEVICE);
flagsize = (frag->size << 16);
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
cpu_relax();
goto restart;
}
-
+
/* The ring is stuck full. */
printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
return NETDEV_TX_BUSY;
return 0;
}
- static void ace_get_drvinfo(struct net_device *dev,
+ static void ace_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ace_private *ap = netdev_priv(dev);
strlcpy(info->driver, "acenic", sizeof(info->driver));
- snprintf(info->version, sizeof(info->version), "%i.%i.%i",
+ snprintf(info->version, sizeof(info->version), "%i.%i.%i",
tigonFwReleaseMajor, tigonFwReleaseMinor,
tigonFwReleaseFix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strlcpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
min_t(u32, size, ACE_WINDOW_SIZE));
- tdest = (void __iomem *) ®s->Window +
+ tdest = (void __iomem *) ®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
/*
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
min_t(u32, size, ACE_WINDOW_SIZE));
- tdest = (void __iomem *) ®s->Window +
+ tdest = (void __iomem *) ®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
for (i = 0; i < 8; i++, magic <<= 1) {
udelay(ACE_SHORT_DELAY);
- if (magic & 0x80)
+ if (magic & 0x80)
local |= EEPROM_DATA_OUT;
else
local &= ~EEPROM_DATA_OUT;
{ 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{0,}
};
static int __init com20020pci_init(void)
{
BUGLVL(D_NORMAL) printk(VERSION);
- return pci_module_init(&com20020pci_driver);
+ return pci_register_driver(&com20020pci_driver);
}
static void __exit com20020pci_cleanup(void)
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0100"},
/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
- {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
+ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
"Entry 0101: ST M45PE10 (128kB non-bufferred)"},
BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
-
+
for (i = 0; i < 50; i++) {
udelay(10);
u32 local_adv, remote_adv;
bp->flow_ctrl = 0;
- if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+ if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
if (bp->duplex == DUPLEX_FULL) {
#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
-
+
#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
static int
new_adv_reg |= ADVERTISE_100FULL;
if (bp->advertising & ADVERTISED_1000baseT_Full)
new_adv1000_reg |= ADVERTISE_1000FULL;
-
+
new_adv_reg |= ADVERTISE_CSMA;
new_adv_reg |= bnx2_phy_get_pause_adv(bp);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
-
+
if (bmsr & BMSR_LSTATUS) {
/* Force link down */
bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
}
static void
- bnx2_set_mac_addr(struct bnx2 *bp)
+ bnx2_set_mac_addr(struct bnx2 *bp)
{
u32 val;
u8 *mac_addr = bp->dev->dev_addr;
REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
- val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
tx_buf = &bp->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb;
- #ifdef BCM_TSO
+ #ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx;
if (orig_budget > dev->quota)
orig_budget = dev->quota;
-
+
work_done = bnx2_rx_int(bp, orig_budget);
*budget -= work_done;
dev->quota -= work_done;
}
-
+
bp->last_status_idx = bp->status_blk->status_idx;
rmb();
cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_RXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
-
+
fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_TXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
-
+
fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
-
+
fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_COM_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
-
+
fw.ver_major = bnx2_COM_b06FwReleaseMajor;
fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
fw.ver_fix = bnx2_COM_b06FwReleaseFix;
val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
/* Enable both bits, even on read. */
- REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+ REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
}
val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
/* Disable both bits, even after read. */
- REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+ REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
BNX2_NVM_ACCESS_ENABLE_WR_EN));
}
/* Find the data_start addr */
data_start = (written == 0) ? offset32 : page_start;
/* Find the data_end addr */
- data_end = (page_end > offset32 + len32) ?
+ data_end = (page_end > offset32 + len32) ?
(offset32 + len32) : page_end;
/* Request access to the flash interface. */
cmd_flags |= BNX2_NVM_COMMAND_LAST;
}
rc = bnx2_nvram_read_dword(bp,
- page_start + j,
- &flash_buffer[j],
+ page_start + j,
+ &flash_buffer[j],
cmd_flags);
if (rc)
if (bp->flash_info->buffered == 0) {
for (addr = page_start; addr < data_start;
addr += 4, i += 4) {
-
+
rc = bnx2_nvram_write_dword(bp, addr,
&flash_buffer[i], cmd_flags);
if (bp->flash_info->buffered == 0) {
for (addr = data_end; addr < page_end;
addr += 4, i += 4) {
-
+
if (addr == page_end-4) {
cmd_flags = BNX2_NVM_COMMAND_LAST;
}
val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
BNX2_DMA_CONFIG_DATA_WORD_SWAP |
#ifdef __BIG_ENDIAN
- BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
+ BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
#endif
- BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
+ BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
DMA_READ_CHANS << 12 |
DMA_WRITE_CHANS << 16;
REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
(u64) bp->stats_blk_mapping >> 32);
- REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
+ REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
bp->tx_wake_thresh = bp->tx_ring_size / 2;
txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
-
+
txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
bp->tx_cons = 0;
bp->hw_tx_cons = 0;
bp->tx_prod_bseq = 0;
-
+
val = BNX2_L2CTX_TYPE_TYPE_L2;
val |= BNX2_L2CTX_TYPE_SIZE_L2;
CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
{
struct rx_bd *rxbd;
int i;
- u16 prod, ring_prod;
+ u16 prod, ring_prod;
u32 val;
/* 8 for CRC and VLAN */
bp->rx_cons = 0;
bp->hw_rx_cons = 0;
bp->rx_prod_bseq = 0;
-
+
for (i = 0; i < bp->rx_max_ring; i++) {
int j;
return ret;
}
}
-
+
return ret;
}
bnx2_read_phy(bp, MII_BMSR, &bmsr);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
spin_unlock_bh(&bp->phy_lock);
-
+
if (bmsr & BMSR_LSTATUS) {
return 0;
}
bnx2_free_mem(bp);
return rc;
}
-
+
mod_timer(&bp->timer, jiffies + bp->current_interval);
atomic_set(&bp->intr_sem, 0);
ring_prod = TX_RING_IDX(prod);
vlan_tag_flags = 0;
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
vlan_tag_flags |=
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
}
- #ifdef BCM_TSO
+ #ifdef BCM_TSO
if ((mss = skb_shinfo(skb)->gso_size) &&
(skb->len > (bp->dev->mtu + ETH_HLEN))) {
u32 tcp_opt_len, ip_tcp_len;
}
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
-
+
tx_buf = &bp->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
pci_unmap_addr_set(tx_buf, mapping, mapping);
net_stats->tx_bytes =
GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
- net_stats->multicast =
+ net_stats->multicast =
GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
- net_stats->collisions =
+ net_stats->collisions =
(unsigned long) stats_blk->stat_EtherStatsCollisions;
- net_stats->rx_length_errors =
+ net_stats->rx_length_errors =
(unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
stats_blk->stat_EtherStatsOverrsizePkts);
- net_stats->rx_over_errors =
+ net_stats->rx_over_errors =
(unsigned long) stats_blk->stat_IfInMBUFDiscards;
- net_stats->rx_frame_errors =
+ net_stats->rx_frame_errors =
(unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
- net_stats->rx_crc_errors =
+ net_stats->rx_crc_errors =
(unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
net_stats->rx_errors = net_stats->rx_length_errors +
}
net_stats->tx_errors =
- (unsigned long)
+ (unsigned long)
stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
+
net_stats->tx_aborted_errors +
return 0;
}
-
+
static int
bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
- cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
+ cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
/* allow advertising 1 speed */
if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
- bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
+ bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
- STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
- STATS_OFFSET32(stat_Dot3StatsFCSErrors),
- STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
- STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
- STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
- STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
- STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
- STATS_OFFSET32(stat_Dot3StatsLateCollisions),
- STATS_OFFSET32(stat_EtherStatsCollisions),
- STATS_OFFSET32(stat_EtherStatsFragments),
- STATS_OFFSET32(stat_EtherStatsJabbers),
- STATS_OFFSET32(stat_EtherStatsUndersizePkts),
- STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
- STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
- STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
- STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
- STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
- STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
- STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
- STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
- STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
- STATS_OFFSET32(stat_XonPauseFramesReceived),
- STATS_OFFSET32(stat_XoffPauseFramesReceived),
- STATS_OFFSET32(stat_OutXonSent),
- STATS_OFFSET32(stat_OutXoffSent),
- STATS_OFFSET32(stat_MacControlFramesReceived),
- STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
- STATS_OFFSET32(stat_IfInMBUFDiscards),
+ STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+ STATS_OFFSET32(stat_Dot3StatsFCSErrors),
+ STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
+ STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
+ STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
+ STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+ STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
+ STATS_OFFSET32(stat_Dot3StatsLateCollisions),
+ STATS_OFFSET32(stat_EtherStatsCollisions),
+ STATS_OFFSET32(stat_EtherStatsFragments),
+ STATS_OFFSET32(stat_EtherStatsJabbers),
+ STATS_OFFSET32(stat_EtherStatsUndersizePkts),
+ STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
+ STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
+ STATS_OFFSET32(stat_XonPauseFramesReceived),
+ STATS_OFFSET32(stat_XoffPauseFramesReceived),
+ STATS_OFFSET32(stat_OutXonSent),
+ STATS_OFFSET32(stat_OutXoffSent),
+ STATS_OFFSET32(stat_MacControlFramesReceived),
+ STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
+ STATS_OFFSET32(stat_IfInMBUFDiscards),
STATS_OFFSET32(stat_FwRxDrop),
};
/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
* skipped because of errata.
- */
+ */
static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
8,0,8,8,8,8,8,8,8,8,
4,0,4,4,4,4,4,4,4,4,
return 0;
}
- static struct ethtool_ops bnx2_ethtool_ops = {
+ static const struct ethtool_ops bnx2_ethtool_ops = {
.get_settings = bnx2_get_settings,
.set_settings = bnx2_set_settings,
.get_drvinfo = bnx2_get_drvinfo,
bp->flags |= PCIX_FLAG;
clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
-
+
clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
switch (clkreg) {
case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
bp->tx_quick_cons_trip = 20;
bp->tx_ticks_int = 80;
bp->tx_ticks = 80;
-
+
bp->rx_quick_cons_trip_int = 6;
bp->rx_quick_cons_trip = 6;
bp->rx_ticks_int = 18;
static int __init bnx2_init(void)
{
- return pci_module_init(&bnx2_pci_driver);
+ return pci_register_driver(&bnx2_pci_driver);
}
static void __exit bnx2_cleanup(void)
* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
- * the page.
+ * the page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
* interrupts, but the INT# assignment needs to be set up properly by
* the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
* that. also, the two descriptor rings are designed to distinguish between
- * encrypted and non-encrypted packets, but we use them for buffering
+ * encrypted and non-encrypted packets, but we use them for buffering
* instead.
*
- * by default, the selective clear mask is set up to process rx packets.
+ * by default, the selective clear mask is set up to process rx packets.
*/
#endif
/* select which firmware to use */
- #define USE_HP_WORKAROUND
+ #define USE_HP_WORKAROUND
#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
#define STOP_TRIES_PHY 1000
#define STOP_TRIES 5000
- /* specify a minimum frame size to deal with some fifo issues
+ /* specify a minimum frame size to deal with some fifo issues
* max mtu == 2 * page size - ethernet header - 64 - swivel =
* 2 * page_size - 0x50
*/
* being confused and never showing a link status of "up."
*/
#define DEFAULT_LINKDOWN_TIMEOUT 5
- /*
+ /*
* Value in seconds, for user input.
*/
static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
{
int i;
- for (i = 0; i < N_TX_RINGS; i++)
+ for (i = 0; i < N_TX_RINGS; i++)
spin_lock(&cp->tx_lock[i]);
}
{
int i;
- for (i = N_TX_RINGS; i > 0; i--)
- spin_unlock(&cp->tx_lock[i - 1]);
+ for (i = N_TX_RINGS; i > 0; i--)
+ spin_unlock(&cp->tx_lock[i - 1]);
}
static inline void cas_unlock_all(struct cas *cp)
#ifdef USE_PCI_INTD
case 3:
#endif
- writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
+ writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
cp->regs + REG_PLUS_INTRN_MASK(ring));
break;
#endif
if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
return;
- writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
+ writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
cp->regs + REG_BIM_LOCAL_DEV_EN);
writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
#endif
}
- /* access to the phy. the following assumes that we've initialized the MIF to
+ /* access to the phy. the following assumes that we've initialized the MIF to
* be in frame rather than bit-bang mode
*/
static u16 cas_phy_read(struct cas *cp, int reg)
cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
cmd |= MIF_FRAME_TURN_AROUND_MSB;
writel(cmd, cp->regs + REG_MIF_FRAME);
-
+
/* poll for completion */
while (limit-- > 0) {
udelay(10);
cmd |= MIF_FRAME_TURN_AROUND_MSB;
cmd |= val & MIF_FRAME_DATA_MASK;
writel(cmd, cp->regs + REG_MIF_FRAME);
-
+
/* poll for completion */
while (limit-- > 0) {
udelay(10);
static void cas_phy_powerup(struct cas *cp)
{
- u16 ctl = cas_phy_read(cp, MII_BMCR);
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
if ((ctl & BMCR_PDOWN) == 0)
return;
static void cas_phy_powerdown(struct cas *cp)
{
- u16 ctl = cas_phy_read(cp, MII_BMCR);
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
if (ctl & BMCR_PDOWN)
return;
/* cp->lock held. note: the last put_page will free the buffer */
static int cas_page_free(struct cas *cp, cas_page_t *page)
{
- pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
cas_buffer_dec(page);
__free_pages(page->buffer, cp->page_order);
#define RX_USED_ADD(x, y) ((x)->used += (y))
#define RX_USED_SET(x, y) ((x)->used = (y))
#else
- #define RX_USED_ADD(x, y)
+ #define RX_USED_ADD(x, y)
#define RX_USED_SET(x, y)
#endif
list_splice(&cp->rx_inuse_list, &list);
INIT_LIST_HEAD(&cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
-
+
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
list_splice(&list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
}
-
+
spin_lock(&cp->rx_spare_lock);
needed = cp->rx_spares_needed;
spin_unlock(&cp->rx_spare_lock);
i = 0;
while (i < needed) {
cas_page_t *spare = cas_page_alloc(cp, flags);
- if (!spare)
+ if (!spare)
break;
list_add(&spare->list, &list);
i++;
static void cas_mif_poll(struct cas *cp, const int enable)
{
u32 cfg;
-
- cfg = readl(cp->regs + REG_MIF_CFG);
+
+ cfg = readl(cp->regs + REG_MIF_CFG);
cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
if (cp->phy_type & CAS_PHY_MII_MDIO1)
- cfg |= MIF_CFG_PHY_SELECT;
+ cfg |= MIF_CFG_PHY_SELECT;
/* poll and interrupt on link status change. */
if (enable) {
cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
}
- writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
- cp->regs + REG_MIF_MASK);
+ writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+ cp->regs + REG_MIF_MASK);
writel(cfg, cp->regs + REG_MIF_CFG);
}
/*
* WTZ: If the old state was link_up, we turn off the carrier
* to replicate everything we do elsewhere on a link-down
- * event when we were already in a link-up state..
+ * event when we were already in a link-up state..
*/
if (oldstate == link_up)
netif_carrier_off(cp->dev);
/*
* WTZ: This branch will simply schedule a full reset after
* we explicitly changed link modes in an ioctl. See if this
- * fixes the link-problems we were having for forced mode.
+ * fixes the link-problems we were having for forced mode.
*/
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
} else {
cas_mif_poll(cp, 0);
ctl = cas_phy_read(cp, MII_BMCR);
- ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
ctl |= cp->link_cntl;
if (ctl & BMCR_ANENABLE) {
{
int limit = STOP_TRIES_PHY;
u16 val;
-
+
cas_phy_write(cp, MII_BMCR, BMCR_RESET);
udelay(100);
while (limit--) {
val = cas_phy_read(cp, BROADCOM_MII_REG4);
if (val & 0x0080) {
/* link workaround */
- cas_phy_write(cp, BROADCOM_MII_REG4,
+ cas_phy_write(cp, BROADCOM_MII_REG4,
val & ~0x0080);
}
-
+
} else if (cp->cas_flags & CAS_FLAG_SATURN) {
- writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
- SATURN_PCFG_FSI : 0x0,
+ writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+ SATURN_PCFG_FSI : 0x0,
cp->regs + REG_SATURN_PCFG);
/* load firmware to address 10Mbps auto-negotiation
- * issue. NOTE: this will need to be changed if the
+ * issue. NOTE: this will need to be changed if the
* default firmware gets fixed.
*/
if (PHY_NS_DP83065 == cp->phy_id) {
cas_phy_read(cp, MII_ADVERTISE) |
(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL |
- CAS_ADVERTISE_PAUSE |
+ CAS_ADVERTISE_PAUSE |
CAS_ADVERTISE_ASYM_PAUSE));
-
+
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
/* make sure that we don't advertise half
* duplex to avoid a chip issue
limit = STOP_TRIES;
while (limit-- > 0) {
udelay(10);
- if ((readl(cp->regs + REG_PCS_MII_CTRL) &
+ if ((readl(cp->regs + REG_PCS_MII_CTRL) &
PCS_MII_RESET) == 0)
break;
}
/* Advertise all capabilities except half-duplex. */
val = readl(cp->regs + REG_PCS_MII_ADVERT);
val &= ~PCS_MII_ADVERT_HD;
- val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
+ val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
PCS_MII_ADVERT_ASYM_PAUSE);
writel(val, cp->regs + REG_PCS_MII_ADVERT);
PCS_MII_STATUS_REMOTE_FAULT)) ==
(PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
if (netif_msg_link(cp))
- printk(KERN_INFO "%s: PCS RemoteFault\n",
+ printk(KERN_INFO "%s: PCS RemoteFault\n",
cp->dev->name);
}
if (cp->opened) {
cp->lstate = link_up;
cp->link_transition = LINK_TRANSITION_LINK_UP;
-
+
cas_set_link_modes(cp);
netif_carrier_on(cp->dev);
}
cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
!cp->link_transition_jiffies_valid) {
/*
- * force a reset, as a workaround for the
- * link-failure problem. May want to move this to a
+ * force a reset, as a workaround for the
+ * link-failure problem. May want to move this to a
* point a bit earlier in the sequence. If we had
* generated a reset a short time ago, we'll wait for
* the link timer to check the status until a
return retval;
}
- static int cas_pcs_interrupt(struct net_device *dev,
+ static int cas_pcs_interrupt(struct net_device *dev,
struct cas *cp, u32 status)
{
u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
- if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
+ if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
return 0;
return cas_pcs_link_check(cp);
}
- static int cas_txmac_interrupt(struct net_device *dev,
+ static int cas_txmac_interrupt(struct net_device *dev,
struct cas *cp, u32 status)
{
u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
return 0;
}
- static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
+ static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
{
cas_hp_inst_t *inst;
u32 val;
static void cas_init_rx_dma(struct cas *cp)
{
- u64 desc_dma = cp->block_dvma;
+ u64 desc_dma = cp->block_dvma;
u32 val;
int i, size;
/* rx free descriptors */
- val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
+ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
if ((N_RX_DESC_RINGS > 1) &&
val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
writel(val, cp->regs + REG_RX_CFG);
- val = (unsigned long) cp->init_rxds[0] -
+ val = (unsigned long) cp->init_rxds[0] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- /* rx desc 2 is for IPSEC packets. however,
+ /* rx desc 2 is for IPSEC packets. however,
* we don't it that for that purpose.
*/
- val = (unsigned long) cp->init_rxds[1] -
+ val = (unsigned long) cp->init_rxds[1] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
- writel((desc_dma + val) & 0xffffffff, cp->regs +
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
REG_PLUS_RX_DB1_LOW);
- writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
+ writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
REG_PLUS_RX_KICK1);
}
-
+
/* rx completion registers */
- val = (unsigned long) cp->init_rxcs[0] -
+ val = (unsigned long) cp->init_rxcs[0] -
(unsigned long) cp->init_block;
writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
/* rx comp 2-4 */
for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
- val = (unsigned long) cp->init_rxcs[i] -
+ val = (unsigned long) cp->init_rxcs[i] -
(unsigned long) cp->init_block;
- writel((desc_dma + val) >> 32, cp->regs +
+ writel((desc_dma + val) >> 32, cp->regs +
REG_PLUS_RX_CBN_HI(i));
- writel((desc_dma + val) & 0xffffffff, cp->regs +
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
REG_PLUS_RX_CBN_LOW(i));
}
}
/* 2 is different from 3 and 4 */
if (N_RX_COMP_RINGS > 1)
- writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
+ writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
cp->regs + REG_PLUS_ALIASN_CLEAR(1));
- for (i = 2; i < N_RX_COMP_RINGS; i++)
- writel(INTR_RX_DONE_ALT,
+ for (i = 2; i < N_RX_COMP_RINGS; i++)
+ writel(INTR_RX_DONE_ALT,
cp->regs + REG_PLUS_ALIASN_CLEAR(i));
}
/* set up pause thresholds */
val = CAS_BASE(RX_PAUSE_THRESH_OFF,
cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
- val |= CAS_BASE(RX_PAUSE_THRESH_ON,
+ val |= CAS_BASE(RX_PAUSE_THRESH_ON,
cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
writel(val, cp->regs + REG_RX_PAUSE_THRESH);
-
+
/* zero out dma reassembly buffers */
for (i = 0; i < 64; i++) {
writel(i, cp->regs + REG_RX_TABLE_ADDR);
* this should be tunable.
*/
writel(0x0, cp->regs + REG_RX_RED);
-
+
/* receive page sizes. default == 2K (0x800) */
val = 0;
if (cp->page_size == 0x1000)
val = 0x2;
else if (cp->page_size == 0x4000)
val = 0x3;
-
+
/* round mtu + offset. constrain to page size. */
size = cp->dev->mtu + 64;
if (size > cp->page_size)
cp->mtu_stride = 1 << (i + 10);
val = CAS_BASE(RX_PAGE_SIZE, val);
- val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
writel(val, cp->regs + REG_RX_PAGE_SIZE);
-
+
/* enable the header parser if desired */
if (CAS_HP_FIRMWARE == cas_prog_null)
return;
static inline void cas_rxc_init(struct cas_rx_comp *rxc)
{
memset(rxc, 0, sizeof(*rxc));
- rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
+ rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
}
/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
}
return new;
}
-
+
/* this needs to be changed if we actually use the ENC RX DESC ring */
- static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
const int index)
{
cas_page_t **page0 = cp->rx_pages[0];
page1[index] = page0[index];
page0[index] = new;
}
- }
+ }
RX_USED_SET(page0[index], 0);
return page0[index];
}
for (i = 0; i < size; i++) {
cas_page_t *page = cas_page_swap(cp, 0, i);
rxd[i].buffer = cpu_to_le64(page->dma_addr);
- rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
+ rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
CAS_BASE(RX_INDEX_RING, 0));
}
- cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
+ cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
cp->rx_last[0] = 0;
cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
}
/* these are all rollovers */
spin_lock(&cp->stat_lock[0]);
- if (stat & MAC_RX_ALIGN_ERR)
+ if (stat & MAC_RX_ALIGN_ERR)
cp->net_stats[0].rx_frame_errors += 0x10000;
if (stat & MAC_RX_CRC_ERR)
return 0;
}
-
+
/* Must be invoked under cp->lock. */
static inline int cas_mdio_link_not_up(struct cas *cp)
{
u16 val;
-
+
switch (cp->lstate) {
case link_force_ret:
if (netif_msg_link(cp))
cp->lstate = link_force_ok;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
break;
-
+
case link_aneg:
val = cas_phy_read(cp, MII_BMCR);
*/
val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
val |= BMCR_FULLDPLX;
- val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
CAS_BMCR_SPEED1000 : BMCR_SPEED100;
cas_phy_write(cp, MII_BMCR, val);
cp->timer_ticks = 5;
if (bmsr & BMSR_LSTATUS) {
/* Ok, here we got a link. If we had it due to a forced
- * fallback, and we were configured for autoneg, we
+ * fallback, and we were configured for autoneg, we
* retry a short autoneg pass. If you know your hub is
* broken, use ethtool ;)
*/
- if ((cp->lstate == link_force_try) &&
+ if ((cp->lstate == link_force_try) &&
(cp->link_cntl & BMCR_ANENABLE)) {
cp->lstate = link_force_ret;
cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
printk(KERN_INFO "%s: Link down\n",
cp->dev->name);
restart = 1;
-
+
} else if (++cp->timer_ticks > 10)
cas_mdio_link_not_up(cp);
-
+
return restart;
}
skbs[entry] = NULL;
cp->tx_tiny_use[ring][entry].nbufs = 0;
-
+
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
struct cas_tx_desc *txd = txds + entry;
if (cp->tx_tiny_use[ring][entry].used) {
cp->tx_tiny_use[ring][entry].used = 0;
entry = TX_DESC_NEXT(ring, entry);
- }
+ }
}
spin_lock(&cp->stat_lock[ring]);
#else
limit = readl(cp->regs + REG_TX_COMPN(ring));
#endif
- if (cp->tx_old[ring] != limit)
+ if (cp->tx_old[ring] != limit)
cas_tx_ringN(cp, ring, limit);
}
}
- static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
- int entry, const u64 *words,
+ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ int entry, const u64 *words,
struct sk_buff **skbref)
{
int dlen, hlen, len, i, alloclen;
struct cas_page *page;
struct sk_buff *skb;
void *addr, *crcaddr;
- char *p;
+ char *p;
hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
len = hlen + dlen;
- if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
+ if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
alloclen = len;
- else
+ else
alloclen = max(hlen, RX_COPY_MIN);
skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
- if (skb == NULL)
+ if (skb == NULL)
return -1;
*skbref = skb;
if (hlen) { /* always copy header pages */
i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
- off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
+ off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
swivel;
i = hlen;
RX_USED_ADD(page, 0x100);
p += hlen;
swivel = 0;
- }
+ }
if (alloclen < (hlen + dlen)) {
frag->page = page->buffer;
frag->page_offset = off;
frag->size = hlen - swivel;
-
+
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
hlen = dlen;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
- pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
- hlen + cp->crc_size,
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
hlen + cp->crc_size,
skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen;
- skb->len += hlen;
+ skb->len += hlen;
frag++;
get_page(page->buffer);
RX_USED_ADD(page, cp->mtu_stride);
else
RX_USED_ADD(page, i);
-
+
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
p += hlen;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
- pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
- dlen + cp->crc_size,
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
addr = cas_page_map(page->buffer);
memcpy(p, addr, dlen + cp->crc_size);
dlen + cp->crc_size,
PCI_DMA_FROMDEVICE);
cas_page_unmap(addr);
- RX_USED_ADD(page, dlen + cp->crc_size);
+ RX_USED_ADD(page, dlen + cp->crc_size);
}
end_copy_pkt:
if (cp->crc_size) {
cas_page_unmap(addr);
}
skb->csum = ntohs(i ^ 0xffff);
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
skb->protocol = eth_type_trans(skb, cp->dev);
return len;
}
/* we can handle up to 64 rx flows at a time. we do the same thing
- * as nonreassm except that we batch up the buffers.
+ * as nonreassm except that we batch up the buffers.
* NOTE: we currently just treat each flow as a bunch of packets that
* we pass up. a better way would be to coalesce the packets
* into a jumbo packet. to do that, we need to do the following:
* data length and merge the checksums.
* 3) on flow release, fix up the header.
* 4) make sure the higher layer doesn't care.
- * because packets get coalesced, we shouldn't run into fragment count
+ * because packets get coalesced, we shouldn't run into fragment count
* issues.
*/
static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
{
int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
struct sk_buff_head *flow = &cp->rx_flows[flowid];
-
- /* this is protected at a higher layer, so no need to
+
+ /* this is protected at a higher layer, so no need to
* do any additional locking here. stick the buffer
* at the end.
*/
new = cas_page_swap(cp, ring, index);
cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
cp->init_rxds[ring][entry].index =
- cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
+ cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
CAS_BASE(RX_INDEX_RING, ring));
entry = RX_DESC_ENTRY(ring, entry + 1);
cp->rx_old[ring] = entry;
-
+
if (entry % 4)
return;
if (ring == 0)
writel(entry, cp->regs + REG_RX_KICK);
else if ((N_RX_DESC_RINGS > 1) &&
- (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
writel(entry, cp->regs + REG_PLUS_RX_KICK1);
}
cp->dev->name, ring, entry);
cluster = -1;
- count = entry & 0x3;
+ count = entry & 0x3;
last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
released = 0;
while (entry != last) {
if (cas_buffer_count(page[entry]) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
- /* let the timer know that we need to
+ /* let the timer know that we need to
* do this again
*/
cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
if (!timer_pending(&cp->link_timer))
- mod_timer(&cp->link_timer, jiffies +
+ mod_timer(&cp->link_timer, jiffies +
CAS_LINK_FAST_TIMEOUT);
cp->rx_old[ring] = entry;
cp->rx_last[ring] = num ? num - released : 0;
spin_lock(&cp->rx_inuse_lock);
list_add(&page[entry]->list, &cp->rx_inuse_list);
spin_unlock(&cp->rx_inuse_lock);
- cp->init_rxds[ring][entry].buffer =
+ cp->init_rxds[ring][entry].buffer =
cpu_to_le64(new->dma_addr);
page[entry] = new;
-
+
}
if (++count == 4) {
}
cp->rx_old[ring] = entry;
- if (cluster < 0)
+ if (cluster < 0)
return 0;
if (ring == 0)
writel(cluster, cp->regs + REG_RX_KICK);
else if ((N_RX_DESC_RINGS > 1) &&
- (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
return 0;
}
/* process a completion ring. packets are set up in three basic ways:
* small packets: should be copied header + data in single buffer.
* large packets: header and data in a single buffer.
- * split packets: header in a separate buffer from data.
+ * split packets: header in a separate buffer from data.
* data may be in multiple pages. data may be > 256
- * bytes but in a single page.
+ * bytes but in a single page.
*
* NOTE: RX page posting is done in this routine as well. while there's
* the capability of using multiple RX completion rings, it isn't
* really worthwhile due to the fact that the page posting will
- * force serialization on the single descriptor ring.
+ * force serialization on the single descriptor ring.
*/
static int cas_rx_ringN(struct cas *cp, int ring, int budget)
{
if (netif_msg_intr(cp))
printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
cp->dev->name, ring,
- readl(cp->regs + REG_RX_COMP_HEAD),
+ readl(cp->regs + REG_RX_COMP_HEAD),
cp->rx_new[ring]);
entry = cp->rx_new[ring];
*/
if (RX_DONT_BATCH || (type == 0x2)) {
/* non-reassm: these always get released */
- cas_skb_release(skb);
+ cas_skb_release(skb);
} else {
cas_rx_flow_pkt(cp, words, skb);
}
i = CAS_VAL(RX_INDEX_NUM, i);
cas_post_page(cp, dring, i);
}
-
+
if (words[0] & RX_COMP1_RELEASE_DATA) {
i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
dring = CAS_VAL(RX_INDEX_RING, i);
}
/* skip to the next entry */
- entry = RX_COMP_ENTRY(ring, entry + 1 +
+ entry = RX_COMP_ENTRY(ring, entry + 1 +
CAS_VAL(RX_COMP1_SKIP, words[0]));
#ifdef USE_NAPI
if (budget && (npackets >= budget))
int last, entry;
last = cp->rx_cur[ring];
- entry = cp->rx_new[ring];
+ entry = cp->rx_new[ring];
if (netif_msg_intr(cp))
printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
entry);
-
+
/* zero and re-mark descriptors */
while (last != entry) {
cas_rxc_init(rxc + last);
if (ring == 0)
writel(last, cp->regs + REG_RX_COMP_TAIL);
- else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
+ else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
}
- /* cassini can use all four PCI interrupts for the completion ring.
+ /* cassini can use all four PCI interrupts for the completion ring.
* rings 3 and 4 are identical
*/
#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
- static inline void cas_handle_irqN(struct net_device *dev,
+ static inline void cas_handle_irqN(struct net_device *dev,
struct cas *cp, const u32 status,
const int ring)
{
- if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
+ if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
cas_post_rxcs_ringN(dev, cp, ring);
}
static inline void cas_handle_irq1(struct cas *cp, const u32 status)
{
if (status & INTR_RX_BUF_UNAVAIL_1) {
- /* Frame arrived, no free RX buffers available.
+ /* Frame arrived, no free RX buffers available.
* NOTE: we can get this on a link transition. */
cas_post_rxds_ringN(cp, 1, 0);
spin_lock(&cp->stat_lock[1]);
spin_unlock(&cp->stat_lock[1]);
}
- if (status & INTR_RX_BUF_AE_1)
- cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
+ if (status & INTR_RX_BUF_AE_1)
+ cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
RX_AE_FREEN_VAL(1));
if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
cas_abnormal_irq(dev, cp, status);
if (status & INTR_RX_BUF_UNAVAIL) {
- /* Frame arrived, no free RX buffers available.
+ /* Frame arrived, no free RX buffers available.
* NOTE: we can get this on a link transition.
*/
cas_post_rxds_ringN(cp, 0, 0);
todo = min(*budget, dev->quota);
/* to make sure we're fair with the work we loop through each
- * ring N_RX_COMP_RING times with a request of
+ * ring N_RX_COMP_RING times with a request of
* todo / N_RX_COMP_RINGS
*/
enable_intr = 1;
txd->buffer = cpu_to_le64(mapping);
}
- static inline void *tx_tiny_buf(struct cas *cp, const int ring,
+ static inline void *tx_tiny_buf(struct cas *cp, const int ring,
const int entry)
{
return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
}
- static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
+ static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
const int entry, const int tentry)
{
cp->tx_tiny_use[ring][tentry].nbufs++;
return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
}
- static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
+ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
struct sk_buff *skb)
{
struct net_device *dev = cp->dev;
spin_lock_irqsave(&cp->tx_lock[ring], flags);
/* This is a hard error, log it. */
- if (TX_BUFFS_AVAIL(cp, ring) <=
+ if (TX_BUFFS_AVAIL(cp, ring) <=
CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
}
ctrl = 0;
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
u64 csum_start_off, csum_stuff_off;
csum_start_off = (u64) (skb->h.raw - skb->data);
csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
- ctrl = TX_DESC_CSUM_EN |
+ ctrl = TX_DESC_CSUM_EN |
CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
}
tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
if (unlikely(tabort)) {
/* NOTE: len is always > tabort */
- cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl | TX_DESC_SOF, 0);
entry = TX_DESC_NEXT(ring, entry);
- memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
+ memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
len - tabort, tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
(nr_frags == 0));
} else {
- cas_write_txd(cp, ring, entry, mapping, len, ctrl |
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl |
TX_DESC_SOF, (nr_frags == 0));
}
entry = TX_DESC_NEXT(ring, entry);
cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
-
+
addr = cas_page_map(fragp->page);
memcpy(tx_tiny_buf(cp, ring, entry),
- addr + fragp->page_offset + len - tabort,
+ addr + fragp->page_offset + len - tabort,
tabort);
cas_page_unmap(addr);
mapping = tx_tiny_map(cp, ring, entry, tentry);
if (netif_msg_tx_queued(cp))
printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
"avail %d\n",
- dev->name, ring, entry, skb->len,
+ dev->name, ring, entry, skb->len,
TX_BUFFS_AVAIL(cp, ring));
writel(entry, cp->regs + REG_TX_KICKN(ring));
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
return 0;
- }
+ }
static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
/* this is only used as a load-balancing hint, so it doesn't
* need to be SMP safe
*/
- static int ring;
+ static int ring;
if (skb_padto(skb, cp->min_frame_size))
return 0;
/* enable completion writebacks, enable paced mode,
* disable read pipe, and disable pre-interrupt compwbs
*/
- val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
+ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
- TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
+ TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
TX_CFG_INTR_COMPWB_DIS;
/* write out tx ring info and tx desc bases */
for (i = 0; i < MAX_TX_RINGS; i++) {
- off = (unsigned long) cp->init_txds[i] -
+ off = (unsigned long) cp->init_txds[i] -
(unsigned long) cp->init_block;
val |= CAS_TX_RINGN_BASE(i);
{
u32 rxcfg = 0;
int i;
-
+
if (cp->dev->flags & IFF_PROMISC) {
rxcfg |= MAC_RX_CFG_PROMISC_EN;
writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
continue;
}
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
cp->regs + REG_MAC_ADDRN(i*3 + 2));
dmi = dmi->next;
}
- /* use hw hash table for the next series of
+ /* use hw hash table for the next series of
* multicast addresses
*/
memset(hash_table, 0, sizeof(hash_table));
dmi = dmi->next;
}
for (i=0; i < 16; i++)
- writel(hash_table[i], cp->regs +
+ writel(hash_table[i], cp->regs +
REG_MAC_HASH_TABLEN(i));
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
}
writel(0x00, cp->regs + REG_MAC_IPG0);
writel(0x08, cp->regs + REG_MAC_IPG1);
writel(0x04, cp->regs + REG_MAC_IPG2);
-
+
/* change later for 802.3z */
- writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
/* min frame + FCS */
writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
- * specify the maximum frame size to prevent RX tag errors on
+ * specify the maximum frame size to prevent RX tag errors on
* oversized frames.
*/
writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
- CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
- (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
+ CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+ (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
cp->regs + REG_MAC_FRAMESIZE_MAX);
- /* NOTE: crc_size is used as a surrogate for half-duplex.
+ /* NOTE: crc_size is used as a surrogate for half-duplex.
* workaround saturn half-duplex issue by increasing preamble
* size to 65 bytes.
*/
* spin_lock_irqsave, but we are called only in cas_init_hw and
* cas_init_hw is protected by cas_lock_all, which calls
* spin_lock_irq (so it doesn't need to save the flags, and
- * we should be OK for the writel, as that is the only
+ * we should be OK for the writel, as that is the only
* difference).
*/
cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
{
int len = strlen(str) + 1;
int i;
-
+
for (i = 0; i < len; i++) {
if (readb(p + i) != str[i])
return 0;
* number.
* 3) fiber cards don't have bridges, so their slot numbers don't
* mean anything.
- * 4) we don't actually know we have a fiber card until after
+ * 4) we don't actually know we have a fiber card until after
* the mac addresses are parsed.
*/
static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
(readb(p + i + 1) == 0x43) &&
(readb(p + i + 2) == 0x49) &&
(readb(p + i + 3) == 0x52)) {
- base = p + (readb(p + i + 8) |
+ base = p + (readb(p + i + 8) |
(readb(p + i + 9) << 8));
break;
- }
+ }
}
if (!base || (readb(base) != 0x82))
goto use_random_mac_addr;
-
+
i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
while (i < EXPANSION_ROM_SIZE) {
if (readb(base + i) != 0x90) /* no vpd found */
char type;
p += 3;
-
+
/* look for the following things:
* -- correct length == 29
- * 3 (type) + 2 (size) +
- * 18 (strlen("local-mac-address") + 1) +
- * 6 (mac addr)
+ * 3 (type) + 2 (size) +
+ * 18 (strlen("local-mac-address") + 1) +
+ * 6 (mac addr)
* -- VPD Instance 'I'
* -- VPD Type Bytes 'B'
* -- VPD data length == 6
* -- property string == local-mac-address
- *
+ *
* -- correct length == 24
- * 3 (type) + 2 (size) +
- * 12 (strlen("entropy-dev") + 1) +
+ * 3 (type) + 2 (size) +
+ * 12 (strlen("entropy-dev") + 1) +
* 7 (strlen("vms110") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'B'
* -- property string == entropy-dev
*
* -- correct length == 18
- * 3 (type) + 2 (size) +
- * 9 (strlen("phy-type") + 1) +
+ * 3 (type) + 2 (size) +
+ * 9 (strlen("phy-type") + 1) +
* 4 (strlen("pcs") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'S'
* -- VPD data length == 4
* -- property string == phy-type
- *
+ *
* -- correct length == 23
- * 3 (type) + 2 (size) +
- * 14 (strlen("phy-interface") + 1) +
+ * 3 (type) + 2 (size) +
+ * 14 (strlen("phy-interface") + 1) +
* 4 (strlen("pcs") + 1)
* -- VPD Instance 'I'
* -- VPD Type String 'S'
type = readb(p + 3);
if (type == 'B') {
if ((klen == 29) && readb(p + 4) == 6 &&
- cas_vpd_match(p + 5,
+ cas_vpd_match(p + 5,
"local-mac-address")) {
- if (mac_off++ > offset)
+ if (mac_off++ > offset)
goto next;
/* set mac address */
- for (j = 0; j < 6; j++)
- dev_addr[j] =
+ for (j = 0; j < 6; j++)
+ dev_addr[j] =
readb(p + 23 + j);
goto found_mac;
}
goto next;
#ifdef USE_ENTROPY_DEV
- if ((klen == 24) &&
+ if ((klen == 24) &&
cas_vpd_match(p + 5, "entropy-dev") &&
cas_vpd_match(p + 17, "vms110")) {
cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
goto found_phy;
}
}
-
+
if ((klen == 23) && readb(p + 4) == 4 &&
cas_vpd_match(p + 5, "phy-interface")) {
if (cas_vpd_match(p + 19, "pcs")) {
int i;
/* get page size for rx buffers. */
- cp->page_order = 0;
+ cp->page_order = 0;
#ifdef USE_PAGE_ORDER
if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
/* see if we can allocate larger pages */
- struct page *page = alloc_pages(GFP_ATOMIC,
- CAS_JUMBO_PAGE_SHIFT -
+ struct page *page = alloc_pages(GFP_ATOMIC,
+ CAS_JUMBO_PAGE_SHIFT -
PAGE_SHIFT);
if (page) {
__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
cp->rx_fifo_size = RX_FIFO_SIZE;
- /* finish phy determination. MDIO1 takes precedence over MDIO0 if
+ /* finish phy determination. MDIO1 takes precedence over MDIO0 if
* they're both connected.
*/
- cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
+ cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
PCI_SLOT(pdev->devfn));
if (cp->phy_type & CAS_PHY_SERDES) {
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0; /* no more checking needed */
- }
+ }
/* MII */
cfg = readl(cp->regs + REG_MIF_CFG);
done:
/* see if we can do gigabit */
cfg = cas_phy_read(cp, MII_BMSR);
- if ((cfg & CAS_BMSR_1000_EXTEND) &&
+ if ((cfg & CAS_BMSR_1000_EXTEND) &&
cas_phy_read(cp, CAS_MII_1000_EXTEND))
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0;
int i;
u32 val;
int txfailed = 0;
-
+
/* enable dma */
val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
writel(val, cp->regs + REG_TX_CFG);
val = readl(cp->regs + REG_MAC_RX_CFG);
if ((val & MAC_RX_CFG_EN)) {
if (txfailed) {
- printk(KERN_ERR
- "%s: enabling mac failed [tx:%08x:%08x].\n",
+ printk(KERN_ERR
+ "%s: enabling mac failed [tx:%08x:%08x].\n",
cp->dev->name,
readl(cp->regs + REG_MIF_STATE_MACHINE),
readl(cp->regs + REG_MAC_STATE_MACHINE));
}
udelay(10);
}
- printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
+ printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
cp->dev->name,
(txfailed? "tx,rx":"rx"),
readl(cp->regs + REG_MIF_STATE_MACHINE),
writel(0, cp->regs + REG_RX_COMP_TAIL);
if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- if (N_RX_DESC_RINGS > 1)
- writel(RX_DESC_RINGN_SIZE(1) - 4,
+ if (N_RX_DESC_RINGS > 1)
+ writel(RX_DESC_RINGN_SIZE(1) - 4,
cp->regs + REG_PLUS_RX_KICK1);
- for (i = 1; i < N_RX_COMP_RINGS; i++)
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
}
}
*fd = 0;
*spd = 10;
*pause = 0;
-
+
/* use GMII registers */
val = cas_phy_read(cp, MII_LPA);
if (val & CAS_LPA_PAUSE)
cas_mif_poll(cp, 0);
val = cas_phy_read(cp, MII_BMCR);
if (val & BMCR_ANENABLE) {
- cas_read_mii_link_mode(cp, &full_duplex, &speed,
+ cas_read_mii_link_mode(cp, &full_duplex, &speed,
&pause);
} else {
if (val & BMCR_FULLDPLX)
if (!full_duplex)
val |= MAC_XIF_DISABLE_ECHO;
}
- if (full_duplex)
+ if (full_duplex)
val |= MAC_XIF_FDPLX_LED;
if (speed == 1000)
val |= MAC_XIF_GMII_MODE;
/* val now set up for REG_MAC_TX_CFG */
/* If gigabit and half-duplex, enable carrier extension
- * mode. increase slot time to 512 bytes as well.
+ * mode. increase slot time to 512 bytes as well.
* else, disable it and make sure slot time is 64 bytes.
* also activate checksum bug workaround
*/
if ((speed == 1000) && !full_duplex) {
- writel(val | MAC_TX_CFG_CARRIER_EXTEND,
+ writel(val | MAC_TX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_TX_CFG);
val = readl(cp->regs + REG_MAC_RX_CFG);
val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
- writel(val | MAC_RX_CFG_CARRIER_EXTEND,
+ writel(val | MAC_RX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_RX_CFG);
writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
} else {
writel(val, cp->regs + REG_MAC_TX_CFG);
- /* checksum bug workaround. don't strip FCS when in
+ /* checksum bug workaround. don't strip FCS when in
* half-duplex mode
*/
val = readl(cp->regs + REG_MAC_RX_CFG);
cp->crc_size = 4;
cp->min_frame_size = CAS_MIN_FRAME;
}
- writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
+ writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
cp->regs + REG_MAC_RX_CFG);
writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
}
val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
if (pause & 0x01) { /* symmetric pause */
val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
- }
+ }
}
writel(val, cp->regs + REG_MAC_CTRL_CFG);
cas_start_dma(cp);
*/
static void cas_hard_reset(struct cas *cp)
{
- writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
udelay(20);
pci_restore_state(cp->pdev);
}
* need some special handling if the chip is set into a
* loopback mode.
*/
- writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
+ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
cp->regs + REG_SW_RESET);
} else {
writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
done:
/* enable various BIM interrupts */
- writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
+ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
/* clear out pci error status mask for handled errors.
* we don't deal with DMA counter overflows as they happen
* all the time.
*/
- writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
- PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
- PCI_ERR_BIM_DMA_READ), cp->regs +
+ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+ PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+ PCI_ERR_BIM_DMA_READ), cp->regs +
REG_PCI_ERR_STATUS_MASK);
/* set up for MII by default to address mac rx reset timeout
#else
while (atomic_read(&cp->reset_task_pending))
schedule();
- #endif
+ #endif
/* Actually stop the chip */
cas_lock_all_save(cp, flags);
cas_reset(cp, 0);
}
schedule_work(&cp->reset_task);
#else
- atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
+ atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
CAS_RESET_ALL : CAS_RESET_MTU);
printk(KERN_ERR "reset called in cas_change_mtu\n");
schedule_work(&cp->reset_task);
* needs to be unmapped.
*/
daddr = le64_to_cpu(txd[ent].buffer);
- dlen = CAS_VAL(TX_DESC_BUFLEN,
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
le64_to_cpu(txd[ent].control));
pci_unmap_page(cp->pdev, daddr, dlen,
PCI_DMA_TODEVICE);
size = RX_DESC_RINGN_SIZE(ring);
for (i = 0; i < size; i++) {
- if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
+ if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
return -1;
}
return 0;
* call to cas_init_hw will restart auto negotiation.
* Setting the second argument of cas_reset to
* !(pending == CAS_RESET_ALL) will set this argument
- * to 1 (avoiding reinitializing the PHY for the normal
+ * to 1 (avoiding reinitializing the PHY for the normal
* PCS case) when auto negotiation is not restarted.
*/
#if 1
if (link_transition_timeout != 0 &&
cp->link_transition_jiffies_valid &&
- ((jiffies - cp->link_transition_jiffies) >
+ ((jiffies - cp->link_transition_jiffies) >
(link_transition_timeout))) {
- /* One-second counter so link-down workaround doesn't
+ /* One-second counter so link-down workaround doesn't
* cause resets to occur so fast as to fool the switch
* into thinking the link is down.
*/
#if 1
if (atomic_read(&cp->reset_task_pending_all) ||
atomic_read(&cp->reset_task_pending_spare) ||
- atomic_read(&cp->reset_task_pending_mtu))
+ atomic_read(&cp->reset_task_pending_mtu))
goto done;
#else
- if (atomic_read(&cp->reset_task_pending))
+ if (atomic_read(&cp->reset_task_pending))
goto done;
#endif
spin_unlock_irqrestore(&cp->lock, flags);
}
- /* tiny buffers are used to avoid target abort issues with
+ /* tiny buffers are used to avoid target abort issues with
* older cassini's
*/
static void cas_tx_tiny_free(struct cas *cp)
if (!cp->tx_tiny_bufs[i])
continue;
- pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
+ pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
cp->tx_tiny_bufs[i],
cp->tx_tiny_dvma[i]);
cp->tx_tiny_bufs[i] = NULL;
int i;
for (i = 0; i < N_TX_RINGS; i++) {
- cp->tx_tiny_bufs[i] =
+ cp->tx_tiny_bufs[i] =
pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
&cp->tx_tiny_dvma[i]);
if (!cp->tx_tiny_bufs[i]) {
/* Reset the chip */
cas_lock_all_save(cp, flags);
/* We set the second arg to cas_reset to zero
- * because cas_init_hw below will have its second
+ * because cas_init_hw below will have its second
* argument set to non-zero, which will force
* autonegotiation to start.
*/
err = -ENOMEM;
if (cas_alloc_rxds(cp) < 0)
goto err_tx_tiny;
-
+
/* allocate spares */
cas_spare_init(cp);
cas_spare_recover(cp, GFP_KERNEL);
/* We can now request the interrupt as we know it's masked
* on the controller. cassini+ has up to 4 interrupts
- * that can be used, but you need to do explicit pci interrupt
+ * that can be used, but you need to do explicit pci interrupt
* mapping to expose them
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
IRQF_SHARED, dev->name, (void *) dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n",
+ printk(KERN_ERR "%s: failed to request irq !\n",
cp->dev->name);
err = -EAGAIN;
goto err_spare;
/* Stop traffic, mark us closed */
cas_lock_all_save(cp, flags);
- cp->opened = 0;
+ cp->opened = 0;
cas_reset(cp, 0);
- cas_phy_init(cp);
+ cas_phy_init(cp);
cas_begin_auto_negotiation(cp, NULL);
cas_clean_rings(cp);
cas_unlock_all_restore(cp, flags);
/* we collate all of the stats into net_stats[N_TX_RING] */
if (!cp->hw_running)
return stats + N_TX_RINGS;
-
+
/* collect outstanding stats */
/* WTZ: the Cassini spec gives these as 16 bit counters but
* stored in 32-bit words. Added a mask of 0xffff to be safe,
* that consistent.
*/
spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
- stats[N_TX_RINGS].rx_crc_errors +=
+ stats[N_TX_RINGS].rx_crc_errors +=
readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
- stats[N_TX_RINGS].rx_frame_errors +=
+ stats[N_TX_RINGS].rx_frame_errors +=
readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
- stats[N_TX_RINGS].rx_length_errors +=
+ stats[N_TX_RINGS].rx_length_errors +=
readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
#if 1
tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
stats[N_TX_RINGS].collisions +=
tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
#else
- stats[N_TX_RINGS].tx_aborted_errors +=
+ stats[N_TX_RINGS].tx_aborted_errors +=
readl(cp->regs + REG_MAC_COLL_EXCESS);
stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
readl(cp->regs + REG_MAC_COLL_LATE);
for (i = 0; i < N_TX_RINGS; i++) {
spin_lock(&cp->stat_lock[i]);
- stats[N_TX_RINGS].rx_length_errors +=
+ stats[N_TX_RINGS].rx_length_errors +=
stats[i].rx_length_errors;
stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
u32 rxcfg, rxcfg_new;
unsigned long flags;
int limit = STOP_TRIES;
-
+
if (!cp->hw_running)
return;
-
+
spin_lock_irqsave(&cp->lock, flags);
rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->phy_address = cp->phy_addr;
cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
cmd->supported |=
- (SUPPORTED_10baseT_Half |
+ (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_TP | SUPPORTED_MII);
if (cp->hw_running) {
cas_mif_poll(cp, 0);
bmcr = cas_phy_read(cp, MII_BMCR);
- cas_read_mii_link_mode(cp, &full_duplex,
+ cas_read_mii_link_mode(cp, &full_duplex,
&speed, &pause);
cas_mif_poll(cp, 1);
}
cmd->advertising |= ADVERTISED_FIBRE;
if (cp->hw_running) {
- /* pcs uses the same bits as mii */
+ /* pcs uses the same bits as mii */
bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
- cas_read_pcs_link_mode(cp, &full_duplex,
+ cas_read_pcs_link_mode(cp, &full_duplex,
&speed, &pause);
}
}
cmd->autoneg = AUTONEG_DISABLE;
cmd->speed =
(bmcr & CAS_BMCR_SPEED1000) ?
- SPEED_1000 :
- ((bmcr & BMCR_SPEED100) ? SPEED_100:
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ? SPEED_100:
SPEED_10);
cmd->duplex =
(bmcr & BMCR_FULLDPLX) ?
}
if (linkstate != link_up) {
/* Force these to "unknown" if the link is not up and
- * autonogotiation in enabled. We can set the link
+ * autonogotiation in enabled. We can set the link
* speed to 0, but not cmd->duplex,
* because its legal values are 0 and 1. Ethtool will
* print the value reported in parentheses after the
static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- memcpy(data, ðtool_cassini_statnames,
+ memcpy(data, ðtool_cassini_statnames,
CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
}
BUG_ON(i != CAS_NUM_STAT_KEYS);
}
- static struct ethtool_ops cas_ethtool_ops = {
+ static const struct ethtool_ops cas_ethtool_ops = {
.get_drvinfo = cas_get_drvinfo,
.get_settings = cas_get_settings,
.set_settings = cas_set_settings,
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int rc = -EOPNOTSUPP;
-
+
/* Hold the PM mutex while doing ioctl's or we may collide
* with open/close and power management and oops.
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
&orig_cacheline_size);
if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
- cas_cacheline_size =
- (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
+ cas_cacheline_size =
+ (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
- if (pci_write_config_byte(pdev,
- PCI_CACHE_LINE_SIZE,
+ if (pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE,
cas_cacheline_size)) {
dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
#endif
cp->dev = dev;
- cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
+ cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
cassini_debug;
cp->link_transition = LINK_TRANSITION_UNKNOWN;
goto err_out_iounmap;
}
- for (i = 0; i < N_TX_RINGS; i++)
+ for (i = 0; i < N_TX_RINGS; i++)
cp->init_txds[i] = cp->init_block->txds[i];
- for (i = 0; i < N_RX_DESC_RINGS; i++)
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
cp->init_rxds[i] = cp->init_block->rxds[i];
- for (i = 0; i < N_RX_COMP_RINGS; i++)
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
cp->init_rxcs[i] = cp->init_block->rxcs[i];
for (i = 0; i < N_RX_FLOWS; i++)
i = readl(cp->regs + REG_BIM_CFG);
printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
- "Ethernet[%d] ", dev->name,
- (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ "Ethernet[%d] ", dev->name,
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
(i & BIM_CFG_32BIT) ? "32" : "64",
(i & BIM_CFG_66MHZ) ? "66" : "33",
- (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i],
err_write_cacheline:
/* Try to restore it in case the error occured after we
- * set it.
+ * set it.
*/
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
/* Restore the cache line size if we had modified
* it.
*/
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
cp->orig_cacheline_size);
}
#endif
unsigned long flags;
mutex_lock(&cp->pm_mutex);
-
+
/* If the driver is opened, we stop the DMA */
if (cp->opened) {
netif_device_detach(dev);
else
link_transition_timeout = 0;
- return pci_module_init(&cas_driver);
+ return pci_register_driver(&cas_driver);
}
static void __exit cas_cleanup(void)
#include <linux/dma-mapping.h>
static char version[] __devinitdata =
- KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
+ KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
u16 data);
- static struct ethtool_ops ethtool_ops;
+ static const struct ethtool_ops ethtool_ops;
static int __devinit
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
if (media[card_idx] != NULL) {
np->an_enable = 0;
if (strcmp (media[card_idx], "auto") == 0 ||
- strcmp (media[card_idx], "autosense") == 0 ||
+ strcmp (media[card_idx], "autosense") == 0 ||
strcmp (media[card_idx], "0") == 0 ) {
- np->an_enable = 2;
+ np->an_enable = 2;
} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
strcmp (media[card_idx], "4") == 0) {
np->speed = 100;
err = find_miiphy (dev);
if (err)
goto err_out_unmap_rx;
-
+
/* Fiber device? */
np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
np->link_status = 0;
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
if (tx_coalesce > 1)
- printk(KERN_INFO "tx_coalesce:\t%d packets\n",
+ printk(KERN_INFO "tx_coalesce:\t%d packets\n",
tx_coalesce);
if (np->coalesce)
printk(KERN_INFO "rx_coalesce:\t%d packets\n"
- KERN_INFO "rx_timeout: \t%d ns\n",
+ KERN_INFO "rx_timeout: \t%d ns\n",
np->rx_coalesce, np->rx_timeout*640);
if (np->vlan)
printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
}
#ifdef MEM_MAPPING
ioaddr = dev->base_addr;
- #endif
+ #endif
/* Check CRC */
crc = ~ether_crc_le (256 - 4, sromdata);
if (psrom->crc != crc) {
long ioaddr = dev->base_addr;
int i;
u16 macctrl;
-
+
i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
if (i)
return i;
-
+
/* Reset all logic functions */
writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
ioaddr + ASICCtrl + 2);
mdelay(10);
-
+
/* DebugCtrl bit 4, 5, 9 must set */
writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
/* VLAN supported */
if (np->vlan) {
/* priority field in RxDMAIntCtrl */
- writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
+ writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
ioaddr + RxDMAIntCtrl);
/* VLANId */
writew (np->vlan, ioaddr + VLANId);
add_timer (&np->timer);
/* Start Tx/Rx */
- writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
+ writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
ioaddr + MACCtrl);
-
+
macctrl = 0;
macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0;
writew(macctrl, ioaddr + MACCtrl);
netif_start_queue (dev);
-
+
/* Enable default interrupts */
EnableInt ();
return 0;
}
- static void
+ static void
rio_timer (unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
np->timer.expires = jiffies + next_tick;
add_timer(&np->timer);
}
-
+
static void
rio_tx_timeout (struct net_device *dev)
{
txdesc = &np->tx_ring[entry];
#if 0
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
txdesc->status |=
cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
IPChecksumEnable);
* Work around: Always use 1 descriptor in 10Mbps mode */
if (entry % np->tx_coalesce == 0 || np->speed == 10)
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
- WordAlignDisable |
+ WordAlignDisable |
TxDMAIndicate |
(1 << FragCountShift));
else
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
- WordAlignDisable |
+ WordAlignDisable |
(1 << FragCountShift));
/* TxDMAPollNow */
dev->base_addr + TFDListPtr0);
writel (0, dev->base_addr + TFDListPtr1);
}
-
+
/* NETDEV WATCHDOG timer */
dev->trans_start = jiffies;
return 0;
ioaddr = dev->base_addr;
np = netdev_priv(dev);
while (1) {
- int_status = readw (ioaddr + IntStatus);
+ int_status = readw (ioaddr + IntStatus);
writew (int_status, ioaddr + IntStatus);
int_status &= DEFAULT_INTR;
if (int_status == 0 || --cnt < 0)
if (tx_status & 0x01)
tx_error (dev, tx_status);
/* Free used tx skbuffs */
- rio_free_tx (dev, 1);
+ rio_free_tx (dev, 1);
}
/* Handle uncommon events */
return IRQ_RETVAL(handled);
}
- static void
- rio_free_tx (struct net_device *dev, int irq)
+ static void
+ rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->old_tx % TX_RING_SIZE;
int tx_use = 0;
unsigned long flag = 0;
-
+
if (irq)
spin_lock(&np->tx_lock);
else
spin_lock_irqsave(&np->tx_lock, flag);
-
+
/* Free used tx skbuffs */
while (entry != np->cur_tx) {
struct sk_buff *skb;
spin_unlock_irqrestore(&np->tx_lock, flag);
np->old_tx = entry;
- /* If the ring is no longer full, clear tx_full and
+ /* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (netif_queue_stopped(dev) &&
- ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 || np->speed == 10)) {
netif_wake_queue (dev);
}
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
- #ifdef ETHER_STATS
- if (tx_status & 0x08)
+ #ifdef ETHER_STATS
+ if (tx_status & 0x08)
np->stats.collisions16++;
#else
- if (tx_status & 0x08)
+ if (tx_status & 0x08)
np->stats.collisions++;
#endif
/* Restart the Tx */
np->rx_skbuff[entry] = NULL;
} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
pci_dma_sync_single_for_cpu(np->pdev,
- desc->fraginfo &
+ desc->fraginfo &
DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
- #if 0
+ #if 0
/* Checksum done by hw, but csum value unavailable. */
- if (np->pci_rev_id >= 0x0c &&
+ if (np->pci_rev_id >= 0x0c &&
!(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
+ }
#endif
netif_rx (skb);
dev->last_rx = jiffies;
mii_get_media (dev);
if (np->speed == 1000)
np->tx_coalesce = tx_coalesce;
- else
+ else
np->tx_coalesce = 1;
macctrl = 0;
macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0;
- macctrl |= (np->tx_flow) ?
+ macctrl |= (np->tx_flow) ?
TxFlowControlEnable : 0;
- macctrl |= (np->rx_flow) ?
+ macctrl |= (np->rx_flow) ?
RxFlowControlEnable : 0;
writew(macctrl, ioaddr + MACCtrl);
np->link_status = 1;
get_stats (dev);
}
- /* PCI Error, a catastronphic error related to the bus interface
+ /* PCI Error, a catastronphic error related to the bus interface
occurs, set GlobalReset and HostReset to reset. */
if (int_status & HostError) {
printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
-
+
np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
- np->stats.collisions += readl (ioaddr + SingleColFrames)
- + readl (ioaddr + MultiColFrames);
-
+ np->stats.collisions += readl (ioaddr + SingleColFrames)
+ + readl (ioaddr + MultiColFrames);
+
/* detailed tx errors */
stat_reg = readw (ioaddr + FramesAbortXSColls);
np->stats.tx_aborted_errors += stat_reg;
long ioaddr = dev->base_addr;
#ifdef MEM_MAPPING
int i;
- #endif
+ #endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
readl (ioaddr + SingleColFrames);
readl (ioaddr + MultiColFrames);
readl (ioaddr + LateCollisions);
- /* detailed rx errors */
+ /* detailed rx errors */
readw (ioaddr + FrameTooLongErrors);
readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + FramesCheckSeqErrors);
#ifdef MEM_MAPPING
for (i = 0x100; i <= 0x150; i += 4)
readl (ioaddr + i);
- #endif
+ #endif
readw (ioaddr + TxJumboFrames);
readw (ioaddr + RxJumboFrames);
readw (ioaddr + TCPCheckSumErrors);
u32 hash_table[2];
u16 rx_mode = 0;
struct netdev_private *np = netdev_priv(dev);
-
+
hash_table[0] = hash_table[1] = 0;
/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
hash_table[1] |= cpu_to_le32(0x02000000);
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
- } else if ((dev->flags & IFF_ALLMULTI) ||
+ } else if ((dev->flags & IFF_ALLMULTI) ||
(dev->mc_count > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (dev->mc_count > 0) {
int i;
struct dev_mc_list *mclist;
- /* Receive broadcast frames and multicast frames filtering
+ /* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist=mclist->next)
+ for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist=mclist->next)
{
int bit, index = 0;
int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
strcpy(info->driver, "dl2k");
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(np->pdev));
- }
+ }
static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->transceiver = XCVR_INTERNAL;
} else {
/* copper device */
- cmd->supported = SUPPORTED_10baseT_Half |
+ cmd->supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_MII;
cmd->port = PORT_MII;
cmd->transceiver = XCVR_INTERNAL;
}
- if ( np->link_status ) {
+ if ( np->link_status ) {
cmd->speed = np->speed;
cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_ENABLE;
else
cmd->autoneg = AUTONEG_DISABLE;
-
+
cmd->phy_address = np->phy_addr;
- return 0;
+ return 0;
}
static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else {
np->an_enable = 1;
mii_set_media(dev);
- return 0;
- }
+ return 0;
+ }
} else {
np->an_enable = 0;
if (np->speed == 1000) {
- cmd->speed = SPEED_100;
+ cmd->speed = SPEED_100;
cmd->duplex = DUPLEX_FULL;
printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
}
switch(cmd->speed + cmd->duplex) {
-
+
case SPEED_10 + DUPLEX_HALF:
np->speed = 10;
np->full_duplex = 0;
break;
-
+
case SPEED_10 + DUPLEX_FULL:
np->speed = 10;
np->full_duplex = 1;
case SPEED_1000 + DUPLEX_HALF:/* not supported */
case SPEED_1000 + DUPLEX_FULL:/* not supported */
default:
- return -EINVAL;
+ return -EINVAL;
}
mii_set_media(dev);
}
return np->link_status;
}
- static struct ethtool_ops ethtool_ops = {
+ static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = rio_get_drvinfo,
.get_settings = rio_get_settings,
.set_settings = rio_set_settings,
int phy_addr;
struct netdev_private *np = netdev_priv(dev);
struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
-
+
struct netdev_desc *desc;
int i;
switch (cmd) {
case SIOCDEVPRIVATE:
break;
-
+
case SIOCDEVPRIVATE + 1:
miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
break;
/* Auto-Negotiation not completed */
return -1;
}
- negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
+ negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
mii_read (dev, phy_addr, MII_ANLPAR);
mscr.image = mii_read (dev, phy_addr, MII_MSCR);
mssr.image = mii_read (dev, phy_addr, MII_MSSR);
printk ("Half duplex\n");
}
}
- if (np->tx_flow)
+ if (np->tx_flow)
printk(KERN_INFO "Enable Tx Flow Control\n");
- else
+ else
printk(KERN_INFO "Disable Tx Flow Control\n");
if (np->rx_flow)
printk(KERN_INFO "Enable Rx Flow Control\n");
pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
pscr.bits.mdi_crossover_mode = 3; /* 11'b */
mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
-
+
/* Soft reset PHY */
mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
bmcr.image = 0;
/* Auto-Negotiation not completed */
return -1;
}
- negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
+ negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
mii_read (dev, phy_addr, PCS_ANLPAR);
np->speed = 1000;
if (negotiate.bits.full_duplex) {
printk ("Half duplex\n");
}
}
- if (np->tx_flow)
+ if (np->tx_flow)
printk(KERN_INFO "Enable Tx Flow Control\n");
- else
+ else
printk(KERN_INFO "Disable Tx Flow Control\n");
if (np->rx_flow)
printk(KERN_INFO "Enable Rx Flow Control\n");
/* Advertise capabilities */
esr.image = mii_read (dev, phy_addr, PCS_ESR);
anar.image = mii_read (dev, phy_addr, MII_ANAR);
- anar.bits.half_duplex =
+ anar.bits.half_duplex =
esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD;
- anar.bits.full_duplex =
+ anar.bits.full_duplex =
esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD;
anar.bits.pause = 1;
anar.bits.asymmetric = 1;
synchronize_irq (dev->irq);
free_irq (dev->irq, dev);
del_timer_sync (&np->timer);
-
+
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev,
+ pci_unmap_single(np->pdev,
np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev,
+ pci_unmap_single(np->pdev,
np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
static int __init
rio_init (void)
{
- return pci_module_init (&rio_driver);
+ return pci_register_driver(&rio_driver);
}
static void __exit
module_exit (rio_exit);
/*
-
- Compile command:
-
+
+ Compile command:
+
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
Read Documentation/networking/dl2k.txt for details.
#else
#define DRIVERNAPI "-NAPI"
#endif
- #define DRV_VERSION "7.1.9-k4"DRIVERNAPI
+ #define DRV_VERSION "7.2.7-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
INTEL_E1000_ETHERNET_DEVICE(0x1098),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x109A),
+ INTEL_E1000_ETHERNET_DEVICE(0x10A4),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
INTEL_E1000_ETHERNET_DEVICE(0x10BA),
printk(KERN_INFO "%s\n", e1000_copyright);
- ret = pci_module_init(&e1000_driver);
+ ret = pci_register_driver(&e1000_driver);
return ret;
}
*
**/
- static void e1000_power_up_phy(struct e1000_adapter *adapter)
+ void e1000_power_up_phy(struct e1000_adapter *adapter)
{
uint16_t mii_reg = 0;
unsigned long flash_start, flash_len;
static int cards_found = 0;
- static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
+ static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
- uint16_t eeprom_data;
+ uint16_t eeprom_data = 0;
uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
if ((err = pci_enable_device(pdev)))
return err;
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
(err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
E1000_ERR("No usable DMA configuration, aborting\n");
- return err;
+ goto err_dma;
}
pci_using_dac = 0;
}
if ((err = pci_request_regions(pdev, e1000_driver_name)))
- return err;
+ goto err_pci_reg;
pci_set_master(pdev);
+ err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
- if (!netdev) {
- err = -ENOMEM;
+ if (!netdev)
goto err_alloc_etherdev;
- }
SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
mmio_start = pci_resource_start(pdev, BAR_0);
mmio_len = pci_resource_len(pdev, BAR_0);
+ err = -EIO;
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
- if (!adapter->hw.hw_addr) {
- err = -EIO;
+ if (!adapter->hw.hw_addr)
goto err_ioremap;
- }
for (i = BAR_1; i <= BAR_5; i++) {
if (pci_resource_len(pdev, i) == 0)
if ((err = e1000_sw_init(adapter)))
goto err_sw_init;
+ err = -EIO;
/* Flash BAR mapping must happen after e1000_sw_init
* because it depends on mac_type */
if ((adapter->hw.mac_type == e1000_ich8lan) &&
flash_start = pci_resource_start(pdev, 1);
flash_len = pci_resource_len(pdev, 1);
adapter->hw.flash_address = ioremap(flash_start, flash_len);
- if (!adapter->hw.flash_address) {
- err = -EIO;
+ if (!adapter->hw.flash_address)
goto err_flashmap;
- }
}
- if ((err = e1000_check_phy_reset_block(&adapter->hw)))
+ if (e1000_check_phy_reset_block(&adapter->hw))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
- /* if ksp3, indicate if it's port a being setup */
- if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
- e1000_ksp3_port_a == 0)
- adapter->ksp3_port_a = 1;
- e1000_ksp3_port_a++;
- /* Reset for multiple KP3 adapters */
- if (e1000_ksp3_port_a == 4)
- e1000_ksp3_port_a = 0;
-
if (adapter->hw.mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
if (e1000_init_eeprom_params(&adapter->hw)) {
E1000_ERR("EEPROM initialization failed\n");
- return -EIO;
+ goto err_eeprom;
}
/* before reading the EEPROM, reset the controller to
if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
- err = -EIO;
goto err_eeprom;
}
if (!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
- err = -EIO;
goto err_eeprom;
}
- e1000_read_part_num(&adapter->hw, &(adapter->part_num));
-
e1000_get_bus_info(&adapter->hw);
init_timer(&adapter->tx_fifo_stall_timer);
break;
}
if (eeprom_data & eeprom_apme_mask)
- adapter->wol |= E1000_WUFC_MAG;
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->quad_port_a = 1;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
/* print bus type/speed/width info */
{
return 0;
err_register:
+ e1000_release_hw_control(adapter);
+ err_eeprom:
+ if (!e1000_check_phy_reset_block(&adapter->hw))
+ e1000_phy_hw_reset(&adapter->hw);
+
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
err_flashmap:
+ #ifdef CONFIG_E1000_NAPI
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ dev_put(&adapter->polling_netdev[i]);
+ #endif
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ #ifdef CONFIG_E1000_NAPI
+ kfree(adapter->polling_netdev);
+ #endif
err_sw_init:
- err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
+ err_pci_reg:
+ err_dma:
+ pci_disable_device(pdev);
return err;
}
err = e1000_request_irq(adapter);
if (err)
- goto err_up;
+ goto err_req_irq;
e1000_power_up_phy(adapter);
return E1000_SUCCESS;
err_up:
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
+ err_req_irq:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
e1000_free_all_tx_resources(adapter);
* (Descriptors) for all queues
* @adapter: board private structure
*
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
* Return 0 on success, negative on failure
**/
if (err) {
DPRINTK(PROBE, ERR,
"Allocation for Tx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
break;
}
}
} else if (hw->mac_type == e1000_80003es2lan) {
tarc = E1000_READ_REG(hw, TARC0);
tarc |= 1;
- if (hw->media_type == e1000_media_type_internal_serdes)
- tarc |= (1 << 20);
E1000_WRITE_REG(hw, TARC0, tarc);
tarc = E1000_READ_REG(hw, TARC1);
tarc |= 1;
* (Descriptors) for all queues
* @adapter: board private structure
*
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
* Return 0 on success, negative on failure
**/
if (err) {
DPRINTK(PROBE, ERR,
"Allocation for Rx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
break;
}
}
* disable receives in the ISR and
* reset device here in the watchdog
*/
- if (adapter->hw.mac_type == e1000_80003es2lan) {
+ if (adapter->hw.mac_type == e1000_80003es2lan)
/* reset device */
schedule_work(&adapter->reset_task);
- }
}
e1000_smartspeed(adapter);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1;
#ifdef NETIF_F_TSO_IPV6
- } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0;
skb->h.th->check =
~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
unsigned int i;
uint8_t css;
- if (likely(skb->ip_summed == CHECKSUM_HW)) {
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
css = skb->h.raw - skb->data;
i = tx_ring->next_to_use;
}
/* reserve a descriptor for the offload context */
- if ((mss) || (skb->ip_summed == CHECKSUM_HW))
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
count++;
count++;
#else
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
count++;
#endif
*/
csum = ntohl(csum ^ 0xFFFF);
skb->csum = csum;
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
adapter->hw_csum_good++;
}
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
- buffer_info-> skb = skb;
+ buffer_info->skb = skb;
goto next_desc;
}
netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
- new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
- if (!(skb = buffer_info->skb))
- skb = netdev_alloc_skb(netdev, bufsz);
- else {
+ skb = buffer_info->skb;
+ if (skb) {
skb_trim(skb, 0);
goto map_skb;
}
+ skb = netdev_alloc_skb(netdev, bufsz);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
dev_kfree_skb(skb);
dev_kfree_skb(oldskb);
break; /* while !buffer_info->skb */
- } else {
- /* Use new allocation */
- dev_kfree_skb(oldskb);
}
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
}
/* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
*/
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = netdev;
-
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
*/
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = netdev;
-
buffer_info->skb = skb;
buffer_info->length = adapter->rx_ps_bsize0;
buffer_info->dma = pci_map_single(pdev, skb->data,
e1000_set_multi(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
- if (adapter->wol & E1000_WUFC_MC) {
+ if (wufc & E1000_WUFC_MC) {
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- uint32_t manc, ret_val;
+ uint32_t manc, err;
pci_set_power_state(pdev, PCI_D0);
e1000_pci_restore_state(adapter);
- ret_val = pci_enable_device(pdev);
+ if ((err = pci_enable_device(pdev))) {
+ printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ return err;
+ }
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
if (netif_running(netdev))
e1000_down(adapter);
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
* 0.55: 22 Mar 2006: Add flow control (pause frame).
* 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
+ * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
- #define FORCEDETH_VERSION "0.56"
+ #ifdef CONFIG_FORCEDETH_NAPI
+ #define DRIVERNAPI "-NAPI"
+ #else
+ #define DRIVERNAPI
+ #endif
+ #define FORCEDETH_VERSION "0.57"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
NvRegRingSizes = 0x108,
#define NVREG_RINGSZ_TXSHIFT 0
#define NVREG_RINGSZ_RXSHIFT 16
- NvRegUnknownTransmitterReg = 0x10c,
+ NvRegTransmitPoll = 0x10c,
+ #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
NvRegLinkSpeed = 0x110,
#define NVREG_LINKSPEED_FORCE 0x10000
#define NVREG_LINKSPEED_10 1000
/* Big endian: should work, but is untested */
struct ring_desc {
- u32 PacketBuffer;
- u32 FlagLen;
+ __le32 buf;
+ __le32 flaglen;
};
struct ring_desc_ex {
- u32 PacketBufferHigh;
- u32 PacketBufferLow;
- u32 TxVlan;
- u32 FlagLen;
+ __le32 bufhigh;
+ __le32 buflow;
+ __le32 txvlan;
+ __le32 flaglen;
};
- typedef union _ring_type {
+ union ring_type {
struct ring_desc* orig;
struct ring_desc_ex* ex;
- } ring_type;
+ };
#define FLAG_MASK_V1 0xffff0000
#define FLAG_MASK_V2 0xffffc000
#define PHYID1_OUI_SHFT 6
#define PHYID2_OUI_MASK 0xfc00
#define PHYID2_OUI_SHFT 10
+ #define PHYID2_MODEL_MASK 0x03f0
+ #define PHY_MODEL_MARVELL_E3016 0x220
+ #define PHY_MARVELL_E3016_INITMASK 0x0300
#define PHY_INIT1 0x0f000
#define PHY_INIT2 0x0e00
#define PHY_INIT3 0x01000
};
struct register_test {
- u32 reg;
- u32 mask;
+ __le32 reg;
+ __le32 mask;
};
static const struct register_test nv_registers_test[] = {
int phyaddr;
int wolenabled;
unsigned int phy_oui;
+ unsigned int phy_model;
u16 gigabit;
int intr_test;
u32 vlanctl_bits;
u32 driver_data;
u32 register_size;
+ int rx_csum;
void __iomem *base;
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- ring_type rx_ring;
+ union ring_type rx_ring;
unsigned int cur_rx, refill_rx;
struct sk_buff **rx_skbuff;
dma_addr_t *rx_dma;
/*
* tx specific fields.
*/
- ring_type tx_ring;
+ union ring_type tx_ring;
unsigned int next_tx, nic_tx;
struct sk_buff **tx_skbuff;
dma_addr_t *tx_dma;
static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
{
- return le32_to_cpu(prd->FlagLen)
+ return le32_to_cpu(prd->flaglen)
& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
}
static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
{
- return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
+ return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
struct fe_priv *np = get_nvpriv(dev);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- if(np->rx_ring.orig)
+ if (np->rx_ring.orig)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
np->rx_ring.orig, np->ring_addr);
} else {
return retval;
}
- static int phy_reset(struct net_device *dev)
+ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
{
struct fe_priv *np = netdev_priv(dev);
u32 miicontrol;
unsigned int tries = 0;
- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- miicontrol |= BMCR_RESET;
+ miicontrol = BMCR_RESET | bmcr_setup;
if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
return -1;
}
u8 __iomem *base = get_hwbase(dev);
u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+ /* phy errata for E3016 phy */
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ reg &= ~PHY_MARVELL_E3016_INITMASK;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
+ printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
else
np->gigabit = 0;
- /* reset the phy */
- if (phy_reset(dev)) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= BMCR_ANENABLE;
+
+ /* reset the phy
+ * (certain phys need bmcr to be setup with reset)
+ */
+ if (phy_reset(dev, mii_control)) {
printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
udelay(NV_TXSTOP_DELAY2);
- writel(0, base + NvRegUnknownTransmitterReg);
+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
}
static void nv_txrx_reset(struct net_device *dev)
np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
skb->end-skb->data, PCI_DMA_FROMDEVICE);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
+ np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
wmb();
- np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
} else {
- np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
- np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
+ np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
+ np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
wmb();
- np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+ np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
}
dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
dev->name, refill_rx);
return 0;
}
+ /* If rx bufs are exhausted called after 50ms to attempt to refresh */
+ #ifdef CONFIG_FORCEDETH_NAPI
+ static void nv_do_rx_refill(unsigned long data)
+ {
+ struct net_device *dev = (struct net_device *) data;
+
+ /* Just reschedule NAPI rx processing */
+ netif_rx_schedule(dev);
+ }
+ #else
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
}
+ #endif
static void nv_init_rx(struct net_device *dev)
{
np->refill_rx = 0;
for (i = 0; i < np->rx_ring_size; i++)
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->rx_ring.orig[i].FlagLen = 0;
+ np->rx_ring.orig[i].flaglen = 0;
else
- np->rx_ring.ex[i].FlagLen = 0;
+ np->rx_ring.ex[i].flaglen = 0;
}
static void nv_init_tx(struct net_device *dev)
np->next_tx = np->nic_tx = 0;
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->tx_ring.orig[i].FlagLen = 0;
+ np->tx_ring.orig[i].flaglen = 0;
else
- np->tx_ring.ex[i].FlagLen = 0;
+ np->tx_ring.ex[i].flaglen = 0;
np->tx_skbuff[i] = NULL;
np->tx_dma[i] = 0;
}
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->tx_ring.orig[i].FlagLen = 0;
+ np->tx_ring.orig[i].flaglen = 0;
else
- np->tx_ring.ex[i].FlagLen = 0;
+ np->tx_ring.ex[i].flaglen = 0;
if (nv_release_txskb(dev, i))
np->stats.tx_dropped++;
}
int i;
for (i = 0; i < np->rx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->rx_ring.orig[i].FlagLen = 0;
+ np->rx_ring.orig[i].flaglen = 0;
else
- np->rx_ring.ex[i].FlagLen = 0;
+ np->rx_ring.ex[i].flaglen = 0;
wmb();
if (np->rx_skbuff[i]) {
pci_unmap_single(np->pci_dev, np->rx_dma[i],
np->tx_dma_len[nr] = bcnt;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
+ np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
} else {
- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
}
tx_flags = np->tx_flags;
offset += bcnt;
size -= bcnt;
- } while(size);
+ } while (size);
/* setup the fragments */
for (i = 0; i < fragments; i++) {
np->tx_dma_len[nr] = bcnt;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
+ np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
} else {
- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
}
offset += bcnt;
size -= bcnt;
/* set last fragment flag */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+ np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
} else {
- np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+ np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
}
np->tx_skbuff[nr] = skb;
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
- tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+ tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
+ NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */
if (np->vlangrp && vlan_tx_tag_present(skb)) {
/* set tx flags */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
} else {
- np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
- np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
+ np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
}
dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
static void nv_tx_done(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- u32 Flags;
+ u32 flags;
unsigned int i;
struct sk_buff *skb;
i = np->nic_tx % np->tx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
+ flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
else
- Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
+ flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
- dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
- dev->name, np->nic_tx, Flags);
- if (Flags & NV_TX_VALID)
+ dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
+ dev->name, np->nic_tx, flags);
+ if (flags & NV_TX_VALID)
break;
if (np->desc_ver == DESC_VER_1) {
- if (Flags & NV_TX_LASTPACKET) {
+ if (flags & NV_TX_LASTPACKET) {
skb = np->tx_skbuff[i];
- if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
+ if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
NV_TX_UNDERFLOW|NV_TX_ERROR)) {
- if (Flags & NV_TX_UNDERFLOW)
+ if (flags & NV_TX_UNDERFLOW)
np->stats.tx_fifo_errors++;
- if (Flags & NV_TX_CARRIERLOST)
+ if (flags & NV_TX_CARRIERLOST)
np->stats.tx_carrier_errors++;
np->stats.tx_errors++;
} else {
}
}
} else {
- if (Flags & NV_TX2_LASTPACKET) {
+ if (flags & NV_TX2_LASTPACKET) {
skb = np->tx_skbuff[i];
- if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
+ if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
- if (Flags & NV_TX2_UNDERFLOW)
+ if (flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
- if (Flags & NV_TX2_CARRIERLOST)
+ if (flags & NV_TX2_CARRIERLOST)
np->stats.tx_carrier_errors++;
np->stats.tx_errors++;
} else {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i].FlagLen),
- le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
- le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
- le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
} else {
printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i].FlagLen),
- le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
- le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
- le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
}
}
}
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
- if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+ if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
hdrlen = VLAN_HLEN;
} else {
}
}
- static void nv_rx_process(struct net_device *dev)
+ static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
- u32 Flags;
+ u32 flags;
u32 vlanflags = 0;
+ int count;
- for (;;) {
+ for (count = 0; count < limit; ++count) {
struct sk_buff *skb;
int len;
int i;
i = np->cur_rx % np->rx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
} else {
- Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
- vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
+ vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
}
- dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
- dev->name, np->cur_rx, Flags);
+ dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
+ dev->name, np->cur_rx, flags);
- if (Flags & NV_RX_AVAIL)
+ if (flags & NV_RX_AVAIL)
break; /* still owned by hardware, */
/*
{
int j;
- dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
+ dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
for (j=0; j<64; j++) {
if ((j%16) == 0)
dprintk("\n%03x:", j);
}
/* look at what we actually got: */
if (np->desc_ver == DESC_VER_1) {
- if (!(Flags & NV_RX_DESCRIPTORVALID))
+ if (!(flags & NV_RX_DESCRIPTORVALID))
goto next_pkt;
- if (Flags & NV_RX_ERROR) {
- if (Flags & NV_RX_MISSEDFRAME) {
+ if (flags & NV_RX_ERROR) {
+ if (flags & NV_RX_MISSEDFRAME) {
np->stats.rx_missed_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
+ if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX_CRCERR) {
+ if (flags & NV_RX_CRCERR) {
np->stats.rx_crc_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX_OVERFLOW) {
+ if (flags & NV_RX_OVERFLOW) {
np->stats.rx_over_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX_ERROR4) {
+ if (flags & NV_RX_ERROR4) {
len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (len < 0) {
np->stats.rx_errors++;
}
}
/* framing errors are soft errors. */
- if (Flags & NV_RX_FRAMINGERR) {
- if (Flags & NV_RX_SUBSTRACT1) {
+ if (flags & NV_RX_FRAMINGERR) {
+ if (flags & NV_RX_SUBSTRACT1) {
len--;
}
}
}
} else {
- if (!(Flags & NV_RX2_DESCRIPTORVALID))
+ if (!(flags & NV_RX2_DESCRIPTORVALID))
goto next_pkt;
- if (Flags & NV_RX2_ERROR) {
- if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
+ if (flags & NV_RX2_ERROR) {
+ if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX2_CRCERR) {
+ if (flags & NV_RX2_CRCERR) {
np->stats.rx_crc_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX2_OVERFLOW) {
+ if (flags & NV_RX2_OVERFLOW) {
np->stats.rx_over_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX2_ERROR4) {
+ if (flags & NV_RX2_ERROR4) {
len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (len < 0) {
np->stats.rx_errors++;
}
}
/* framing errors are soft errors */
- if (Flags & NV_RX2_FRAMINGERR) {
- if (Flags & NV_RX2_SUBSTRACT1) {
+ if (flags & NV_RX2_FRAMINGERR) {
+ if (flags & NV_RX2_SUBSTRACT1) {
len--;
}
}
}
- if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
- Flags &= NV_RX2_CHECKSUMMASK;
- if (Flags == NV_RX2_CHECKSUMOK1 ||
- Flags == NV_RX2_CHECKSUMOK2 ||
- Flags == NV_RX2_CHECKSUMOK3) {
+ if (np->rx_csum) {
+ flags &= NV_RX2_CHECKSUMMASK;
+ if (flags == NV_RX2_CHECKSUMOK1 ||
+ flags == NV_RX2_CHECKSUMOK2 ||
+ flags == NV_RX2_CHECKSUMOK3) {
dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
} else {
skb->protocol = eth_type_trans(skb, dev);
dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
dev->name, np->cur_rx, len, skb->protocol);
- if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
- vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
- } else {
+ #ifdef CONFIG_FORCEDETH_NAPI
+ if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
+ vlan_hwaccel_receive_skb(skb, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK);
+ else
+ netif_receive_skb(skb);
+ #else
+ if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
+ vlan_hwaccel_rx(skb, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK);
+ else
netif_rx(skb);
- }
+ #endif
dev->last_rx = jiffies;
np->stats.rx_packets++;
np->stats.rx_bytes += len;
next_pkt:
np->cur_rx++;
}
+
+ return count;
}
static void set_bufsize(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
struct sockaddr *macaddr = (struct sockaddr*)addr;
- if(!is_valid_ether_addr(macaddr->sa_data))
+ if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
pff |= NVREG_PFF_PROMISC;
} else {
pff |= NVREG_PFF_MYADDR;
lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
switch (adv_pause) {
- case (ADVERTISE_PAUSE_CAP):
+ case ADVERTISE_PAUSE_CAP:
if (lpa_pause & LPA_PAUSE_CAP) {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
break;
- case (ADVERTISE_PAUSE_ASYM):
+ case ADVERTISE_PAUSE_ASYM:
if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
{
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
break;
- case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+ case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
if (lpa_pause & LPA_PAUSE_CAP)
{
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
nv_tx_done(dev);
spin_unlock(&np->lock);
- nv_rx_process(dev);
- if (nv_alloc_rx(dev)) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
-
if (events & NVREG_IRQ_LINK) {
spin_lock(&np->lock);
nv_link_irq(dev);
printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
dev->name, events);
}
+ #ifdef CONFIG_FORCEDETH_NAPI
+ if (events & NVREG_IRQ_RX_ALL) {
+ netif_rx_schedule(dev);
+
+ /* Disable furthur receive irq's */
+ spin_lock(&np->lock);
+ np->irqmask &= ~NVREG_IRQ_RX_ALL;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ else
+ writel(np->irqmask, base + NvRegIrqMask);
+ spin_unlock(&np->lock);
+ }
+ #else
+ nv_rx_process(dev, dev->weight);
+ if (nv_alloc_rx(dev)) {
+ spin_lock(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock(&np->lock);
+ }
+ #endif
if (i > max_interrupt_work) {
spin_lock(&np->lock);
/* disable interrupts on the nic */
return IRQ_RETVAL(i);
}
+ #ifdef CONFIG_FORCEDETH_NAPI
+ static int nv_napi_poll(struct net_device *dev, int *budget)
+ {
+ int pkts, limit = min(*budget, dev->quota);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ pkts = nv_rx_process(dev, limit);
+
+ if (nv_alloc_rx(dev)) {
+ spin_lock_irq(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock_irq(&np->lock);
+ }
+
+ if (pkts < limit) {
+ /* all done, no more packets present */
+ netif_rx_complete(dev);
+
+ /* re-enable receive interrupts */
+ spin_lock_irq(&np->lock);
+ np->irqmask |= NVREG_IRQ_RX_ALL;
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ else
+ writel(np->irqmask, base + NvRegIrqMask);
+ spin_unlock_irq(&np->lock);
+ return 0;
+ } else {
+ /* used up our quantum, so reschedule */
+ dev->quota -= pkts;
+ *budget -= pkts;
+ return 1;
+ }
+ }
+ #endif
+
+ #ifdef CONFIG_FORCEDETH_NAPI
+ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
+ {
+ struct net_device *dev = (struct net_device *) data;
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
+ writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
+
+ if (events) {
+ netif_rx_schedule(dev);
+ /* disable receive interrupts on the nic */
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ pci_push(base);
+ }
+ return IRQ_HANDLED;
+ }
+ #else
static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) data;
if (!(events & np->irqmask))
break;
- nv_rx_process(dev);
+ nv_rx_process(dev, dev->weight);
if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock);
if (!np->in_shutdown)
spin_unlock_irq(&np->lock);
break;
}
-
}
dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
return IRQ_RETVAL(i);
}
+ #endif
static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
{
if (netif_running(dev))
printk(KERN_INFO "%s: link down.\n", dev->name);
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick,
+ * and cause autoneg to start */
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
} else {
int adv, bmcr;
bmcr |= BMCR_FULLDPLX;
if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
if (np->phy_oui == PHY_OUI_MARVELL) {
- /* reset the phy */
- if (phy_reset(dev)) {
+ /* reset the phy in order for forced mode settings to stick */
+ if (phy_reset(dev, bmcr)) {
printk(KERN_INFO "%s: phy reset failed\n", dev->name);
return -EINVAL;
}
- } else if (netif_running(dev)) {
- /* Wait a bit and then reconfigure the nic. */
- udelay(10);
- nv_linkchange(dev);
+ } else {
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (netif_running(dev)) {
+ /* Wait a bit and then reconfigure the nic. */
+ udelay(10);
+ nv_linkchange(dev);
+ }
}
}
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick*/
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
if (netif_running(dev)) {
nv_start_rx(dev);
if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
/* fall back to old rings */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- if(rxtx_ring)
+ if (rxtx_ring)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
rxtx_ring, ring_addr);
} else {
static u32 nv_get_rx_csum(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+ return (np->rx_csum) != 0;
}
static int nv_set_rx_csum(struct net_device *dev, u32 data)
int retcode = 0;
if (np->driver_data & DEV_HAS_CHECKSUM) {
-
- if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
- (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
- /* already set or unset */
- return 0;
- }
-
if (data) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
- np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
} else {
- printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
- return -EINVAL;
+ np->rx_csum = 0;
+ /* vlan is dependent on rx checksum offload */
+ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
}
-
if (netif_running(dev)) {
spin_lock_irq(&np->lock);
writel(np->txrxctl_bits, base + NvRegTxRxControl);
struct fe_priv *np = netdev_priv(dev);
if (np->driver_data & DEV_HAS_STATISTICS)
- return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
+ return sizeof(struct nv_ethtool_stats)/sizeof(u64);
else
return 0;
}
struct sk_buff *tx_skb, *rx_skb;
dma_addr_t test_dma_addr;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
- u32 Flags;
+ u32 flags;
int len, i, pkt_len;
u8 *pkt_data;
u32 filter_flags = 0;
tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
- np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
} else {
- np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
- np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
- np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
+ np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+ np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
}
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(get_hwbase(dev));
/* check for rx of the packet */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
} else {
- Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
}
- if (Flags & NV_RX_AVAIL) {
+ if (flags & NV_RX_AVAIL) {
ret = 0;
} else if (np->desc_ver == DESC_VER_1) {
- if (Flags & NV_RX_ERROR)
+ if (flags & NV_RX_ERROR)
ret = 0;
} else {
- if (Flags & NV_RX2_ERROR) {
+ if (flags & NV_RX2_ERROR) {
ret = 0;
}
}
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
+ netif_poll_disable(dev);
netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
nv_start_rx(dev);
nv_start_tx(dev);
netif_start_queue(dev);
+ netif_poll_enable(dev);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
}
}
- static struct ethtool_ops ops = {
+ static const struct ethtool_ops ops = {
.get_drvinfo = nv_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_wol = nv_get_wol,
dprintk(KERN_DEBUG "nv_open: begin\n");
- /* 1) erase previous misconfiguration */
+ /* erase previous misconfiguration */
if (np->driver_data & DEV_HAS_POWER_CNTRL)
nv_mac_reset(dev);
- /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
writel(0, base + NvRegMulticastMaskA);
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
- /* 2) initialize descriptor rings */
+ /* initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
writel(0, base + NvRegLinkSpeed);
- writel(0, base + NvRegUnknownTransmitterReg);
+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
nv_txrx_reset(dev);
writel(0, base + NvRegUnknownSetupReg6);
np->in_shutdown = 0;
- /* 3) set mac address */
- nv_copy_mac_to_hw(dev);
-
- /* 4) give hw rings */
+ /* give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
- /* 5) continue setup */
writel(np->linkspeed, base + NvRegLinkSpeed);
if (np->desc_ver == DESC_VER_1)
writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
- /* 6) continue setup */
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
nv_start_rx(dev);
nv_start_tx(dev);
netif_start_queue(dev);
+ netif_poll_enable(dev);
+
if (ret) {
netif_carrier_on(dev);
} else {
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
+ netif_poll_disable(dev);
synchronize_irq(dev->irq);
del_timer_sync(&np->oom_kick);
if (np->wolenabled)
nv_start_rx(dev);
- /* special op: write back the misordered MAC address - otherwise
- * the next nv_probe would see a wrong address.
- */
- writel(np->orig_mac[0], base + NvRegMacAddrA);
- writel(np->orig_mac[1], base + NvRegMacAddrB);
-
/* FIXME: power down nic */
return 0;
unsigned long addr;
u8 __iomem *base;
int err, i;
- u32 powerstate;
+ u32 powerstate, txreg;
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
np->pkt_limit = NV_PKTLIMIT_2;
if (id->driver_data & DEV_HAS_CHECKSUM) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
#ifdef NETIF_F_TSO
dev->set_multicast_list = nv_set_multicast;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = nv_poll_controller;
+ #endif
+ dev->weight = 64;
+ #ifdef CONFIG_FORCEDETH_NAPI
+ dev->poll = nv_napi_poll;
#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout;
np->orig_mac[0] = readl(base + NvRegMacAddrA);
np->orig_mac[1] = readl(base + NvRegMacAddrB);
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* check the workaround bit for correct mac address order */
+ txreg = readl(base + NvRegTransmitPoll);
+ if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ } else {
+ /* need to reverse mac address to correct order */
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* set permanent address to be correct aswell */
+ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+ writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+ }
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (!is_valid_ether_addr(dev->perm_addr)) {
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ /* set mac address */
+ nv_copy_mac_to_hw(dev);
+
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
if (id2 < 0 || id2 == 0xffff)
continue;
+ np->phy_model = id2 & PHYID2_MODEL_MASK;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
unregister_netdev(dev);
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+
/* free all structures */
free_rings(dev);
iounmap(get_hwbase(dev));
static int __init init_nic(void)
{
printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void __exit exit_nic(void)
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
- extern struct ethtool_ops gfar_ethtool_ops;
+ extern const struct ethtool_ops gfar_ethtool_ops;
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
/* Set up checksumming */
if (likely((dev->features & NETIF_F_IP_CSUM)
- && (CHECKSUM_HW == skb->ip_summed))) {
+ && (CHECKSUM_PARTIAL == skb->ip_summed))) {
fcb = gfar_add_fcb(skb, txbdp);
status |= TXBD_TOE;
gfar_tx_checksum(skb, fcb);
tempval |= TCTRL_VLINS;
gfar_write(&priv->regs->tctrl, tempval);
-
+
/* Enable VLAN tag extraction */
tempval = gfar_read(&priv->regs->rctrl);
tempval |= RCTRL_VLEX;
u32 tempval;
if(dev->flags & IFF_PROMISC) {
- if (netif_msg_drv(priv))
- printk(KERN_INFO "%s: Entering promiscuous mode.\n",
- dev->name);
/* Set RCTRL to PROM */
tempval = gfar_read(®s->rctrl);
tempval |= RCTRL_PROM;
tempval &= ~(RCTRL_PROM);
gfar_write(®s->rctrl, tempval);
}
-
+
if(dev->flags & IFF_ALLMULTI) {
/* Set the hash to rx all multicast frames */
gfar_write(®s->igaddr0, 0xffffffff);
if (err)
gfar_mdio_exit();
-
+
return err;
}
Written 1998-2000 by Donald Becker.
Updates 2000 by Keith Underwood.
- This software may be used and distributed according to the terms of
+ This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
*/
#define DRV_NAME "hamachi"
- #define DRV_VERSION "2.0"
- #define DRV_RELDATE "June 27, 2006"
+ #define DRV_VERSION "2.1"
+ #define DRV_RELDATE "Sept 11, 2006"
/* A few user-configurable values. */
static int max_rx_latency = 0x11;
static int max_rx_gap = 0x05;
static int min_rx_pkt = 0x18;
- static int max_tx_latency = 0x00;
+ static int max_tx_latency = 0x00;
static int max_tx_gap = 0x00;
static int min_tx_pkt = 0x30;
- The next bit can be used to force half-duplex. This is a bad
idea since no known implementations implement half-duplex, and,
in general, half-duplex for gigabit ethernet is a bad idea.
- 0x00000080 : Force half-duplex
+ 0x00000080 : Force half-duplex
Default is full-duplex.
- In the original driver, the ninth bit could be used to force
full-duplex. Maintain that for compatibility
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* The Hamachi chipset supports 3 parameters each for Rx and Tx
* interruput management. Parameters will be loaded as specified into
- * the TxIntControl and RxIntControl registers.
+ * the TxIntControl and RxIntControl registers.
*
* The registers are arranged as follows:
* 23 - 16 15 - 8 7 - 0
* | min_pkt | max_gap | max_latency |
* ---------------------------------
* min_pkt : The minimum number of packets processed between
- * interrupts.
+ * interrupts.
* max_gap : The maximum inter-packet gap in units of 8.192 us
* max_latency : The absolute time between interrupts in units of 8.192 us
- *
+ *
*/
static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
other linux headers causing many compiler warnings.
*/
#ifndef IP_MF
- #define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */
+ #define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */
#endif
/* Define IP_OFFSET to be IPOPT_OFFSET */
/* Condensed bus+endian portability operations. */
#if ADDRLEN == 64
#define cpu_to_leXX(addr) cpu_to_le64(addr)
- #else
+ #else
#define cpu_to_leXX(addr) cpu_to_le32(addr)
- #endif
+ #endif
/*
IVc. Errata
- None noted.
+ None noted.
V. Recent Changes
- 01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears
+ 01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears
to help avoid some stall conditions -- this needs further research.
- 01/15/1999 EPK Creation of the hamachi_tx function. This function cleans
+ 01/15/1999 EPK Creation of the hamachi_tx function. This function cleans
the Tx ring and is called from hamachi_start_xmit (this used to be
called from hamachi_interrupt but it tends to delay execution of the
interrupt handler and thus reduce bandwidth by reducing the latency
- between hamachi_rx()'s). Notably, some modification has been made so
- that the cleaning loop checks only to make sure that the DescOwn bit
- isn't set in the status flag since the card is not required
+ between hamachi_rx()'s). Notably, some modification has been made so
+ that the cleaning loop checks only to make sure that the DescOwn bit
+ isn't set in the status flag since the card is not required
to set the entire flag to zero after processing.
- 01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
+ 01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
checked before attempting to add a buffer to the ring. If the ring is full
an attempt is made to free any dirty buffers and thus find space for
the new buffer or the function returns non-zero which should case the
scheduler to reschedule the buffer later.
- 01/15/1999 EPK Some adjustments were made to the chip initialization.
- End-to-end flow control should now be fully active and the interrupt
+ 01/15/1999 EPK Some adjustments were made to the chip initialization.
+ End-to-end flow control should now be fully active and the interrupt
algorithm vars have been changed. These could probably use further tuning.
01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to
problems with network stalls, try setting these to higher values.
Valid values are 0x00 through 0xff.
- 01/15/1999 EPK In general, the overall bandwidth has increased and
+ 01/15/1999 EPK In general, the overall bandwidth has increased and
latencies are better (sometimes by a factor of 2). Stalls are rare at
this point, however there still appears to be a bug somewhere between the
hardware and driver. TCP checksum errors under load also appear to be
rings was typically getting set correctly, but the Tx ring wasn't getting
the DescEndRing bit set during initialization. ??? Does this mean the
hamachi card is using the DescEndRing in processing even if a particular
- slot isn't in use -- hypothetically, the card might be searching the
+ slot isn't in use -- hypothetically, the card might be searching the
entire Tx ring for slots with the DescOwn bit set and then processing
them. If the DescEndRing bit isn't set, then it might just wander off
through memory until it hits a chunk of data with that bit set
and then looping back.
- 02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
+ 02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
problem (TxCmd and RxCmd need only to be set when idle or stopped.
02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt.
- (Michel Mueller pointed out the ``permanently busy'' potential
+ (Michel Mueller pointed out the ``permanently busy'' potential
problem here).
- 02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
+ 02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
02/23/1999 EPK Verified that the interrupt status field bits for Tx were
incorrectly defined and corrected (as per Michel Mueller).
02/20/2000 KDU Some of the control was just plain odd. Cleaned up the
hamachi_start_xmit() and hamachi_interrupt() code. There is still some
- re-structuring I would like to do.
+ re-structuring I would like to do.
03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation
parameters on a dual P3-450 setup yielded the new default interrupt
Eric's scheme. Rx should be more often...
03/13/2000 KDU Added a patch to make the Rx Checksum code interact
- nicely with non-linux machines.
+ nicely with non-linux machines.
- 03/13/2000 KDU Experimented with some of the configuration values:
+ 03/13/2000 KDU Experimented with some of the configuration values:
-It seems that enabling PCI performance commands for descriptors
- (changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
- performance impact for any of my tests. (ttcp, netpipe, netperf) I will
+ (changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
+ performance impact for any of my tests. (ttcp, netpipe, netperf) I will
leave them that way until I hear further feedback.
- -Increasing the PCI_LATENCY_TIMER to 130
+ -Increasing the PCI_LATENCY_TIMER to 130
(2 + (burst size of 128 * (0 wait states + 1))) seems to slightly
degrade performance. Leaving default at 64 pending further information.
- 03/14/2000 KDU Further tuning:
+ 03/14/2000 KDU Further tuning:
-adjusted boguscnt in hamachi_rx() to depend on interrupt
mitigation parameters chosen.
- -Selected a set of interrupt parameters based on some extensive testing.
+ -Selected a set of interrupt parameters based on some extensive testing.
These may change with more testing.
TO DO:
PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in
that case.
- -fix the reset procedure. It doesn't quite work.
+ -fix the reset procedure. It doesn't quite work.
*/
/* A few values that may be tweaked. */
/* Size of each temporary Rx buffer, calculated as:
* 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
* the card) + 8 bytes of status info + 8 bytes for the Rx Checksum +
- * 2 more because we use skb_reserve.
+ * 2 more because we use skb_reserve.
*/
#define PKT_BUF_SZ 1538
/* The Hamachi Rx and Tx buffer descriptors. */
struct hamachi_desc {
- u32 status_n_length;
+ u32 status_n_length;
#if ADDRLEN == 64
u32 pad;
u64 addr;
/* Bits in hamachi_desc.status_n_length */
enum desc_status_bits {
- DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
+ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
DescIntr=0x10000000,
};
MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
-
+
static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int hamachi_close(struct net_device *dev);
static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
- static struct ethtool_ops ethtool_ops;
- static struct ethtool_ops ethtool_ops_no_mii;
+ static const struct ethtool_ops ethtool_ops;
+ static const struct ethtool_ops ethtool_ops_no_mii;
static int __devinit hamachi_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
option = dev->mem_start;
/* If the bus size is misidentified, do the following. */
- force32 = force32 ? force32 :
+ force32 = force32 ? force32 :
((option >= 0) ? ((option & 0x00000070) >> 4) : 0 );
if (force32)
writeb(force32, ioaddr + VirtualJumpers);
* be valid for a moment. Wait for a little while until it is. If
* it takes more than 10ms, forget it.
*/
- udelay(10);
+ udelay(10);
i = readb(ioaddr + PCIClkMeas);
for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){
- udelay(10);
- i = readb(ioaddr + PCIClkMeas);
+ udelay(10);
+ i = readb(ioaddr + PCIClkMeas);
}
hmp->base = ioaddr;
rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1;
tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1;
- hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
+ hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
(min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency);
- hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
+ hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
(min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency);
return 0;
err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
hmp->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
hmp->tx_ring_dma);
err_out_cleardev:
free_netdev (dev);
return;
}
- \f
+
static int hamachi_open(struct net_device *dev)
{
struct hamachi_private *hmp = netdev_priv(dev);
writel(cpu_to_le32(hmp->tx_ring_dma), ioaddr + TxPtr);
#endif
- /* TODO: It would make sense to organize this as words since the card
+ /* TODO: It would make sense to organize this as words since the card
* documentation does. -KDU
*/
for (i = 0; i < 6; i++)
/* Configure the FIFO */
fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6;
switch (fifo_info){
- case 0 :
+ case 0 :
/* No FIFO */
writew(0x0000, ioaddr + FIFOcfg);
break;
- case 1 :
+ case 1 :
/* Configure the FIFO for 512K external, 16K used for Tx. */
writew(0x0028, ioaddr + FIFOcfg);
break;
- case 2 :
+ case 2 :
/* Configure the FIFO for 1024 external, 32K used for Tx. */
writew(0x004C, ioaddr + FIFOcfg);
break;
- case 3 :
+ case 3 :
/* Configure the FIFO for 2048 external, 32K used for Tx. */
writew(0x006C, ioaddr + FIFOcfg);
break;
- default :
+ default :
printk(KERN_WARNING "%s: Unsupported external memory config!\n",
dev->name);
/* Default to no FIFO */
writew(0x0000, ioaddr + FIFOcfg);
break;
}
-
+
if (dev->if_port == 0)
dev->if_port = hmp->default_port;
/* Setting the Rx mode will start the Rx process. */
- /* If someone didn't choose a duplex, default to full-duplex */
+ /* If someone didn't choose a duplex, default to full-duplex */
if (hmp->duplex_lock != 1)
hmp->mii_if.full_duplex = 1;
#endif
writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
writew(0x215F, ioaddr + MACCnfg);
- writew(0x000C, ioaddr + FrameGap0);
+ writew(0x000C, ioaddr + FrameGap0);
/* WHAT?!?!? Why isn't this documented somewhere? -KDU */
writew(0x1018, ioaddr + FrameGap1);
/* Why do we enable receives/transmits here? -KDU */
if (hamachi_debug > 1) {
printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n",
- tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
+ tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
(tx_int_var & 0x00ff0000) >> 16);
printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n",
- rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
+ rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
(rx_int_var & 0x00ff0000) >> 16);
printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var);
}
- writel(tx_int_var, ioaddr + TxIntrCtrl);
- writel(rx_int_var, ioaddr + RxIntrCtrl);
+ writel(tx_int_var, ioaddr + TxIntrCtrl);
+ writel(rx_int_var, ioaddr + RxIntrCtrl);
set_rx_mode(dev);
int entry = hmp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
- if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
+ if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
break;
/* Free the original skb. */
skb = hmp->tx_skbuff[entry];
if (skb != 0) {
- pci_unmap_single(hmp->pci_dev,
- hmp->tx_ring[entry].addr, skb->len,
+ pci_unmap_single(hmp->pci_dev,
+ hmp->tx_ring[entry].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
- if (entry >= TX_RING_SIZE-1)
+ if (entry >= TX_RING_SIZE-1)
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
- cpu_to_le32(DescEndRing);
+ cpu_to_le32(DescEndRing);
hmp->stats.tx_packets++;
}
printk("\n");
}
- /* Reinit the hardware and make sure the Rx and Tx processes
+ /* Reinit the hardware and make sure the Rx and Tx processes
are up and running.
*/
dev->if_port = 0;
* -Turn off MAC receiver
* -Issue Reset
*/
-
+
for (i = 0; i < RX_RING_SIZE; i++)
hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
hmp->tx_ring[i].status_n_length = cpu_to_le32(
DescEndRing |
(hmp->tx_ring[i].status_n_length & 0x0000FFFF));
- else
+ else
hmp->tx_ring[i].status_n_length &= 0x0000ffff;
skb = hmp->tx_skbuff[i];
if (skb){
- pci_unmap_single(hmp->pci_dev, hmp->tx_ring[i].addr,
+ pci_unmap_single(hmp->pci_dev, hmp->tx_ring[i].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[i] = NULL;
udelay(60); /* Sleep 60 us just for safety sake */
writew(0x0002, ioaddr + RxCmd); /* STOP Rx */
-
- writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */
+
+ writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */
hmp->tx_full = 0;
hmp->cur_rx = hmp->cur_tx = 0;
hmp->dirty_rx = hmp->dirty_tx = 0;
/* Rx packets are also presumed lost; however, we need to make sure a
* ring of buffers is in tact. -KDU
- */
+ */
for (i = 0; i < RX_RING_SIZE; i++){
struct sk_buff *skb = hmp->rx_skbuff[i];
if (skb){
- pci_unmap_single(hmp->pci_dev, hmp->rx_ring[i].addr,
+ pci_unmap_single(hmp->pci_dev, hmp->rx_ring[i].addr,
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
hmp->rx_skbuff[i] = NULL;
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
- hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
+ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
}
hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
#endif
/* My attempt at a reasonable correction */
/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
- * card needs room to do 8 byte alignment, +2 so we can reserve
- * the first 2 bytes, and +16 gets room for the status word from the
+ * card needs room to do 8 byte alignment, +2 so we can reserve
+ * the first 2 bytes, and +16 gets room for the status word from the
* card. -KDU
*/
- hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
+ hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
(((dev->mtu+26+7) & ~7) + 2 + 16));
/* Initialize all Rx descriptors. */
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
/* -2 because it doesn't REALLY have that first 2 bytes -KDU */
- hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
+ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
}
hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
unsigned entry;
u16 status;
- /* Ok, now make sure that the queue has space before trying to
+ /* Ok, now make sure that the queue has space before trying to
add another skbuff. if we return non-zero the scheduler
should interpret this as a queue full and requeue the buffer
for later.
if( !(status & 0x0001) || (status & 0x0002))
writew(0x0001, hmp->base + TxCmd);
return 1;
- }
+ }
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
}
#endif
- hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE));
-
+
/* Hmmmm, could probably put a DescIntr on these, but the way
the driver is currently coded makes Tx interrupts unnecessary
since the clearing of the Tx ring is handled by the start_xmit
routine. This organization helps mitigate the interrupts a
bit and probably renders the max_tx_latency param useless.
-
+
Update: Putting a DescIntr bit on all of the descriptors and
mitigating interrupt frequency with the tx_min_pkt parameter. -KDU
*/
* hence, any packet that got put off because we were in the transmit
* routine should IMMEDIATELY get a chance to be re-queued. -KDU
*/
- if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
+ if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
netif_wake_queue(dev); /* Typical path */
else {
hmp->tx_full = 1;
/* This code should RARELY need to execute. After all, this is
* a gigabit link, it should consume packets as fast as we put
* them in AND we clear the Tx ring in hamachi_start_xmit().
- */
+ */
if (hmp->tx_full){
for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
int entry = hmp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
- if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
+ if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
break;
skb = hmp->tx_skbuff[entry];
/* Free the original skb. */
if (skb){
- pci_unmap_single(hmp->pci_dev,
- hmp->tx_ring[entry].addr,
+ pci_unmap_single(hmp->pci_dev,
+ hmp->tx_ring[entry].addr,
skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
- if (entry >= TX_RING_SIZE-1)
- hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
+ if (entry >= TX_RING_SIZE-1)
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
cpu_to_le32(DescEndRing);
hmp->stats.tx_packets++;
}
struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
u32 desc_status = le32_to_cpu(desc->status_n_length);
u16 data_size = desc_status; /* Implicit truncate */
- u8 *buf_addr;
+ u8 *buf_addr;
s32 frame_status;
-
+
if (desc_status & DescOwn)
break;
pci_dma_sync_single_for_cpu(hmp->pci_dev,
} else {
struct sk_buff *skb;
/* Omit CRC */
- u16 pkt_len = (frame_status & 0x07ff) - 4;
+ u16 pkt_len = (frame_status & 0x07ff) - 4;
#ifdef RX_CHECKSUM
u32 pfck = *(u32 *) &buf_addr[data_size - 8];
#endif
PCI_DMA_FROMDEVICE);
/* Call copy + cksum if available. */
#if 1 || USE_IP_COPYSUM
- eth_copy_and_sum(skb,
+ eth_copy_and_sum(skb,
hmp->rx_skbuff[entry]->data, pkt_len, 0);
skb_put(skb, pkt_len);
#else
hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
} else {
- pci_unmap_single(hmp->pci_dev,
+ pci_unmap_single(hmp->pci_dev,
hmp->rx_ring[entry].addr,
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
p_r = *p;
p_r1 = *(p-1);
switch (inv) {
- case 0:
+ case 0:
crc = (p_r & 0xffff) + (p_r >> 16);
break;
- case 1:
+ case 1:
crc = (p_r >> 16) + (p_r & 0xffff)
- + (p_r1 >> 16 & 0xff00);
+ + (p_r1 >> 16 & 0xff00);
break;
- case 2:
- crc = p_r + (p_r1 >> 16);
+ case 2:
+ crc = p_r + (p_r1 >> 16);
break;
- case 3:
- crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
+ case 3:
+ crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
break;
default: /*NOTREACHED*/ crc = 0;
}
* could do the pseudo myself and return
* CHECKSUM_UNNECESSARY
*/
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- }
+ }
}
#endif /* RX_CHECKSUM */
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
if (entry >= RX_RING_SIZE-1)
- desc->status_n_length |= cpu_to_le32(DescOwn |
+ desc->status_n_length |= cpu_to_le32(DescOwn |
DescEndPacket | DescEndRing | DescIntr);
else
- desc->status_n_length |= cpu_to_le32(DescOwn |
+ desc->status_n_length |= cpu_to_le32(DescOwn |
DescEndPacket | DescIntr);
}
hmp->rx_ring[i].status_n_length = 0;
hmp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
if (skb) {
- pci_unmap_single(hmp->pci_dev,
- hmp->rx_ring[i].addr, hmp->rx_buf_sz,
+ pci_unmap_single(hmp->pci_dev,
+ hmp->rx_ring[i].addr, hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
hmp->rx_skbuff[i] = NULL;
for (i = 0; i < TX_RING_SIZE; i++) {
skb = hmp->tx_skbuff[i];
if (skb) {
- pci_unmap_single(hmp->pci_dev,
- hmp->tx_ring[i].addr, skb->len,
+ pci_unmap_single(hmp->pci_dev,
+ hmp->tx_ring[i].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
hmp->tx_skbuff[i] = NULL;
according to ifconfig. It does get incremented in hamachi_tx(),
so I think I'll comment it out here and see if better things
happen.
- */
+ */
/* hmp->stats.tx_packets = readl(ioaddr + 0x000); */
hmp->stats.rx_bytes = readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */
void __iomem *ioaddr = hmp->base;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
writew(0x000F, ioaddr + AddrMode);
} else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
return mii_link_ok(&np->mii_if);
}
- static struct ethtool_ops ethtool_ops = {
+ static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
.get_settings = hamachi_get_settings,
.get_link = hamachi_get_link,
};
- static struct ethtool_ops ethtool_ops_no_mii = {
+ static const struct ethtool_ops ethtool_ops_no_mii = {
.begin = check_if_running,
.get_drvinfo = hamachi_get_drvinfo,
};
if (dev) {
struct hamachi_private *hmp = netdev_priv(dev);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
hmp->rx_ring_dma);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
hmp->tx_ring_dma);
unregister_netdev(dev);
iounmap(hmp->base);
struct sk_buff *skb)
{
#if defined(CONFIG_IBM_EMAC_TAH)
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++dev->stats.tx_packets_csum;
return EMAC_TX_CTRL_TAH_CSUM;
}
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
- static struct ethtool_ops emac_ethtool_ops = {
+ static const struct ethtool_ops emac_ethtool_ops = {
.get_settings = emac_ethtool_get_settings,
.set_settings = emac_ethtool_set_settings,
.get_drvinfo = emac_ethtool_get_drvinfo,
*/
#define IOC3_NAME "ioc3-eth"
- #define IOC3_VERSION "2.6.3-3"
+ #define IOC3_VERSION "2.6.3-4"
#include <linux/init.h>
#include <linux/delay.h>
static void ioc3_init(struct net_device *dev);
static const char ioc3_str[] = "IOC3 Ethernet";
- static struct ethtool_ops ioc3_ethtool_ops;
+ static const struct ethtool_ops ioc3_ethtool_ops;
/* We use this to acquire receive skb's that we can DMA directly into. */
* MAC header which should not be summed and the TCP/UDP pseudo headers
* manually.
*/
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
int proto = ntohs(skb->nh.iph->protocol);
unsigned int csoff;
struct iphdr *ih = skb->nh.iph;
return rc;
}
- static struct ethtool_ops ioc3_ethtool_ops = {
+ static const struct ethtool_ops ioc3_ethtool_ops = {
.get_drvinfo = ioc3_get_drvinfo,
.get_settings = ioc3_get_settings,
.set_settings = ioc3_set_settings,
netif_stop_queue(dev); /* Lock out others. */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
ip->emcr |= EMCR_PROMISC;
ioc3_w_emcr(ip->emcr);
(void) ioc3_r_emcr();
********************************************************************/
#include <linux/module.h>
- #include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
- for (i=0; (io[i] < 2000) && (i < 4); i++) {
+ for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}
IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
- for (i=0; i < 4; i++) {
+ for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
}
#else
#define DRIVERNAPI "-NAPI"
#endif
- #define DRV_VERSION "1.0.109-k2"DRIVERNAPI
+ #define DRV_VERSION "1.0.112-k2"DRIVERNAPI
char ixgb_driver_version[] = DRV_VERSION;
static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
static void ixgb_netpoll(struct net_device *dev);
#endif
- /* Exported from other modules */
+ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+ enum pci_channel_state state);
+ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
+ static void ixgb_io_resume (struct pci_dev *pdev);
+ /* Exported from other modules */
extern void ixgb_check_options(struct ixgb_adapter *adapter);
+ static struct pci_error_handlers ixgb_err_handler = {
+ .error_detected = ixgb_io_error_detected,
+ .slot_reset = ixgb_io_slot_reset,
+ .resume = ixgb_io_resume,
+ };
+
static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
+ .err_handler = &ixgb_err_handler
};
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* some defines for controlling descriptor fetches in h/w */
- #define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
- #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
- * this */
- #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
- * is pushed this many descriptors
- * from head */
+ #define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
+ #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
+ * this */
+ #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
+ * is pushed this many descriptors
+ * from head */
/**
* ixgb_init_module - Driver Registration Routine
printk(KERN_INFO "%s\n", ixgb_copyright);
- return pci_module_init(&ixgb_driver);
+ return pci_register_driver(&ixgb_driver);
}
module_init(ixgb_init_module);
int err;
if (likely(skb_is_gso(skb))) {
+ struct ixgb_buffer *buffer_info;
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+ buffer_info = &adapter->tx_ring.buffer_info[i];
+ WARN_ON(buffer_info->dma != 0);
context_desc->ipcss = ipcss;
context_desc->ipcso = ipcso;
unsigned int i;
uint8_t css, cso;
- if(likely(skb->ip_summed == CHECKSUM_HW)) {
+ if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct ixgb_buffer *buffer_info;
css = skb->h.raw - skb->data;
cso = (skb->h.raw + skb->csum) - skb->data;
i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+ buffer_info = &adapter->tx_ring.buffer_info[i];
+ WARN_ON(buffer_info->dma != 0);
context_desc->tucss = css;
context_desc->tucso = cso;
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size;
+ WARN_ON(buffer_info->dma != 0);
buffer_info->dma =
pci_map_single(adapter->pdev,
skb->data + offset,
ixgb_update_stats(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* Prevent stats update while adapter is being reset */
+ if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+ return;
if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
if (unlikely(netif_queue_stopped(netdev))) {
spin_lock(&adapter->tx_lock);
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
- (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
+ (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock);
}
#define IXGB_CB_LENGTH 256
if (length < IXGB_CB_LENGTH) {
struct sk_buff *new_skb =
- dev_alloc_skb(length + NET_IP_ALIGN);
+ netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
- new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
/* leave three descriptors unused */
while(--cleancount > 2) {
/* recycle! its good for you */
- if (!(skb = buffer_info->skb))
- skb = dev_alloc_skb(adapter->rx_buffer_len
- + NET_IP_ALIGN);
- else {
+ skb = buffer_info->skb;
+ if (skb) {
skb_trim(skb, 0);
goto map_skb;
}
+ skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
+ + NET_IP_ALIGN);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
*/
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = netdev;
-
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
static void ixgb_netpoll(struct net_device *dev)
{
- struct ixgb_adapter *adapter = dev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(dev);
disable_irq(adapter->pdev->irq);
ixgb_intr(adapter->pdev->irq, dev, NULL);
}
#endif
+ /**
+ * ixgb_io_error_detected() - called when PCI error is detected
+ * @pdev pointer to pci device with error
+ * @state pci channel state after error
+ *
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+ enum pci_channel_state state)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ if(netif_running(netdev))
+ ixgb_down(adapter, TRUE);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ /**
+ * ixgb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev pointer to pci device with error
+ *
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the ixgb_probe() routine.
+ */
+ static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ if(pci_enable_device(pdev)) {
+ DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Perform card reset only on one instance of the card */
+ if (0 != PCI_FUNC (pdev->devfn))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_set_master(pdev);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ ixgb_reset(adapter);
+
+ /* Make sure the EEPROM is good */
+ if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
+ DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /**
+ * ixgb_io_resume - called when its OK to resume normal operations
+ * @pdev pointer to pci device with error
+ *
+ * The error recovery driver tells us that its OK to resume
+ * normal operation. Implementation resembles the second-half
+ * of the ixgb_probe() routine.
+ */
+ static void ixgb_io_resume (struct pci_dev *pdev)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ pci_set_master(pdev);
+
+ if(netif_running(netdev)) {
+ if(ixgb_up(adapter)) {
+ printk ("ixgb: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ }
+
/* ixgb_main.c */
static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
- static struct ethtool_ops mv643xx_ethtool_ops;
+ static const struct ethtool_ops mv643xx_ethtool_ops;
static char mv643xx_driver_name[] = "mv643xx_eth";
static char mv643xx_driver_version[] = "1.0";
desc->byte_cnt = length;
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
BUG_ON(skb->protocol != ETH_P_IP);
cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
eth_update_mib_counters(mp);
for (i = 0; i < MV643XX_STATS_LEN; i++) {
- char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;
+ char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;
data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
}
return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
}
- static struct ethtool_ops mv643xx_ethtool_ops = {
+ static const struct ethtool_ops mv643xx_ethtool_ops = {
.get_settings = mv643xx_get_settings,
.set_settings = mv643xx_set_settings,
.get_drvinfo = mv643xx_get_drvinfo,
u8 mac_addr[6]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
+ int fw_multicast_support;
u32 devctl;
u16 msi_flags;
u32 read_dma;
u32 write_dma;
u32 read_write_dma;
+ u32 link_changes;
+ u32 msg_enable;
};
static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
MODULE_PARM_DESC(myri10ge_max_irq_loops,
"Set stuck legacy IRQ detection threshold\n");
+ #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
+
+ static int myri10ge_debug = -1; /* defaults above */
+ module_param(myri10ge_debug, int, 0);
+ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
+
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
struct mcp_cmd *buf;
char buf_bytes[sizeof(*buf) + 8];
struct mcp_cmd_response *response = mgp->cmd;
- char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET;
+ char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
u32 dma_low, dma_high, result, value;
int sleep_total = 0;
if (result == 0) {
data->data0 = value;
return 0;
+ } else if (result == MXGEFW_CMD_UNKNOWN) {
+ return -ENOSYS;
} else {
dev_err(&mgp->pdev->dev,
"command %d failed, result = %d\n",
buf[4] = htonl(dma_low); /* dummy addr LSW */
buf[5] = htonl(enable); /* enable? */
- submit = mgp->sram + 0xfc01c0;
+ submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
buf[5] = htonl(8); /* where to copy to */
buf[6] = htonl(0); /* where to jump to */
- submit = mgp->sram + 0xfc0000;
+ submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
mb();
mgp->rx_small.cnt = 0;
mgp->rx_done.idx = 0;
mgp->rx_done.cnt = 0;
+ mgp->link_changes = 0;
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_promisc(mgp, 0, 0);
myri10ge_change_pause(mgp, mgp->pause);
* pages directly and building a fraglist in the near future.
*/
- static inline struct sk_buff *myri10ge_alloc_big(int bytes)
+ static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev,
+ int bytes)
{
struct sk_buff *skb;
unsigned long data, roundup;
- skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD);
+ skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD);
if (skb == NULL)
return NULL;
/* Allocate 2x as much space as required and use whichever portion
* does not cross a 4KB boundary */
- static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
+ static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev,
+ unsigned int bytes)
{
struct sk_buff *skb;
unsigned long data, boundary;
- skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1);
+ skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1);
if (unlikely(skb == NULL))
return NULL;
/* Allocate just enough space, and verify that the allocated
* space does not cross a 4KB boundary */
- static inline struct sk_buff *myri10ge_alloc_small(int bytes)
+ static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev,
+ int bytes)
{
struct sk_buff *skb;
unsigned long roundup, data, end;
- skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD);
+ skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD);
if (unlikely(skb == NULL))
return NULL;
"myri10ge_alloc_small: small skb crossed 4KB boundary\n");
myri10ge_skb_cross_4k = 1;
dev_kfree_skb_any(skb);
- skb = myri10ge_alloc_small_safe(bytes);
+ skb = myri10ge_alloc_small_safe(dev, bytes);
}
return skb;
}
static inline int
- myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
- int idx)
+ myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp,
+ int bytes, int idx)
{
+ struct net_device *dev = mgp->dev;
+ struct pci_dev *pdev = mgp->pdev;
struct sk_buff *skb;
dma_addr_t bus;
int len, retval = 0;
bytes += VLAN_HLEN; /* account for 802.1q vlan tag */
if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
- skb = myri10ge_alloc_big(bytes);
+ skb = myri10ge_alloc_big(dev, bytes);
else if (myri10ge_skb_cross_4k)
- skb = myri10ge_alloc_small_safe(bytes);
+ skb = myri10ge_alloc_small_safe(dev, bytes);
else
- skb = myri10ge_alloc_small(bytes);
+ skb = myri10ge_alloc_small(dev, bytes);
if (unlikely(skb == NULL)) {
rx->alloc_fail++;
(vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
skb->csum = hw_csum;
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
}
unmap_len = pci_unmap_len(&rx->info[idx], len);
/* try to replace the received skb */
- if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) {
+ if (myri10ge_getbuf(rx, mgp, bytes, idx)) {
/* drop the frame -- the old skbuf is re-cycled */
mgp->stats.rx_dropped += 1;
return 0;
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, mgp->dev);
- skb->dev = mgp->dev;
if (mgp->csum_flag) {
if ((skb->protocol == ntohs(ETH_P_IP)) ||
(skb->protocol == ntohs(ETH_P_IPV6))) {
skb->csum = ntohs((u16) csum);
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
} else
myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
}
if (mgp->link_state != stats->link_up) {
mgp->link_state = stats->link_up;
if (mgp->link_state) {
- printk(KERN_INFO "myri10ge: %s: link up\n",
- mgp->dev->name);
+ if (netif_msg_link(mgp))
+ printk(KERN_INFO
+ "myri10ge: %s: link up\n",
+ mgp->dev->name);
netif_carrier_on(mgp->dev);
+ mgp->link_changes++;
} else {
- printk(KERN_INFO "myri10ge: %s: link down\n",
- mgp->dev->name);
+ if (netif_msg_link(mgp))
+ printk(KERN_INFO
+ "myri10ge: %s: link down\n",
+ mgp->dev->name);
netif_carrier_off(mgp->dev);
+ mgp->link_changes++;
}
}
if (mgp->rdma_tags_available !=
"serial_number", "tx_pkt_start", "tx_pkt_done",
"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
- "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered",
+ "link_changes", "link_up", "dropped_link_overflow",
+ "dropped_link_error_or_filtered", "dropped_multicast_filtered",
"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
"dropped_no_big_buffer"
};
data[i++] = (unsigned int)mgp->stop_queue;
data[i++] = (unsigned int)mgp->watchdog_resets;
data[i++] = (unsigned int)mgp->tx_linearized;
+ data[i++] = (unsigned int)mgp->link_changes;
data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
data[i++] =
(unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
+ data[i++] =
+ (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
}
- static struct ethtool_ops myri10ge_ethtool_ops = {
+ static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
+ {
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ mgp->msg_enable = value;
+ }
+
+ static u32 myri10ge_get_msglevel(struct net_device *netdev)
+ {
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ return mgp->msg_enable;
+ }
+
+ static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
#endif
.get_strings = myri10ge_get_strings,
.get_stats_count = myri10ge_get_stats_count,
- .get_ethtool_stats = myri10ge_get_ethtool_stats
+ .get_ethtool_stats = myri10ge_get_ethtool_stats,
+ .set_msglevel = myri10ge_set_msglevel,
+ .get_msglevel = myri10ge_get_msglevel
};
static int myri10ge_allocate_rings(struct net_device *dev)
/* Fill the receive rings */
for (i = 0; i <= mgp->rx_small.mask; i++) {
- status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev,
+ status = myri10ge_getbuf(&mgp->rx_small, mgp,
mgp->small_bytes, i);
if (status) {
printk(KERN_ERR
for (i = 0; i <= mgp->rx_big.mask; i++) {
status =
- myri10ge_getbuf(&mgp->rx_big, mgp->pdev,
- dev->mtu + ETH_HLEN, i);
+ myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i);
if (status) {
printk(KERN_ERR
"myri10ge: %s: alloced only %d big bufs\n",
}
if (mgp->mtrr >= 0) {
- mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000;
- mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000;
- mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000;
+ mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
+ mgp->rx_small.wc_fifo =
+ (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
+ mgp->rx_big.wc_fifo =
+ (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
} else {
mgp->tx.wc_fifo = NULL;
mgp->rx_small.wc_fifo = NULL;
cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
- status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0);
+ cmd.data2 = sizeof(struct mcp_irq_data);
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
+ if (status == -ENOSYS) {
+ dma_addr_t bus = mgp->fw_stats_bus;
+ bus += offsetof(struct mcp_irq_data, send_done_count);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
+ status = myri10ge_send_cmd(mgp,
+ MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
+ &cmd, 0);
+ /* Firmware cannot support multicast without STATS_DMA_V2 */
+ mgp->fw_multicast_support = 0;
+ } else {
+ mgp->fw_multicast_support = 1;
+ }
if (status) {
printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
dev->name);
if (cnt > 0) {
/* pad it to 64 bytes. The src is 64 bytes bigger than it
* needs to be so that we don't overrun it */
- myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64);
+ myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt),
+ src, 64);
mb();
}
}
pseudo_hdr_offset = 0;
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
- if (likely(skb->ip_summed == CHECKSUM_HW)) {
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
cksum_offset = (skb->h.raw - skb->data);
pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
- if (skb_checksum_help(skb, 0))
+ if (skb_checksum_help(skb))
goto drop;
cksum_offset = 0;
pseudo_hdr_offset = 0;
static void myri10ge_set_multicast_list(struct net_device *dev)
{
+ struct myri10ge_cmd cmd;
+ struct myri10ge_priv *mgp;
+ struct dev_mc_list *mc_list;
+ int err;
+
+ mgp = netdev_priv(dev);
/* can be called from atomic contexts,
* pass 1 to force atomicity in myri10ge_send_cmd() */
- myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1);
+ myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
+
+ /* This firmware is known to not support multicast */
+ if (!mgp->fw_multicast_support)
+ return;
+
+ /* Disable multicast filtering */
+
+ err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
+ if (err != 0) {
+ printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI,"
+ " error status: %d\n", dev->name, err);
+ goto abort;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* request to disable multicast filtering, so quit here */
+ return;
+ }
+
+ /* Flush the filters */
+
+ err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
+ &cmd, 1);
+ if (err != 0) {
+ printk(KERN_ERR
+ "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
+ ", error status: %d\n", dev->name, err);
+ goto abort;
+ }
+
+ /* Walk the multicast list, and add each address */
+ for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
+ memcpy(&cmd.data0, &mc_list->dmi_addr, 4);
+ memcpy(&cmd.data1, ((char *)&mc_list->dmi_addr) + 4, 2);
+ cmd.data0 = htonl(cmd.data0);
+ cmd.data1 = htonl(cmd.data1);
+ err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
+ &cmd, 1);
+
+ if (err != 0) {
+ printk(KERN_ERR "myri10ge: %s: Failed "
+ "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
+ "%d\t", dev->name, err);
+ printk(KERN_ERR "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ((unsigned char *)&mc_list->dmi_addr)[0],
+ ((unsigned char *)&mc_list->dmi_addr)[1],
+ ((unsigned char *)&mc_list->dmi_addr)[2],
+ ((unsigned char *)&mc_list->dmi_addr)[3],
+ ((unsigned char *)&mc_list->dmi_addr)[4],
+ ((unsigned char *)&mc_list->dmi_addr)[5]
+ );
+ goto abort;
+ }
+ }
+ /* Enable multicast filtering */
+ err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
+ if (err != 0) {
+ printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI,"
+ "error status: %d\n", dev->name, err);
+ goto abort;
+ }
+
+ return;
+
+ abort:
+ return;
}
static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
*/
#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
+ #define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7
+ #define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
{
mgp->fw_name = myri10ge_fw_unaligned;
if (myri10ge_force_firmware == 0) {
+ int link_width, exp_cap;
+ u16 lnk;
+
+ exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
+ link_width = (lnk >> 4) & 0x3f;
+
myri10ge_enable_ecrc(mgp);
- /* Check to see if the upstream bridge is known to
- * provide aligned completions */
- if (bridge
- /* ServerWorks HT2000/HT1000 */
- && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
- && bridge->device ==
- PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) {
+ /* Check to see if Link is less than 8 or if the
+ * upstream bridge is known to provide aligned
+ * completions */
+ if (link_width < 8) {
+ dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
+ link_width);
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+ } else if (bridge &&
+ /* ServerWorks HT2000/HT1000 */
+ ((bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
+ && bridge->device ==
+ PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE)
+ /* All Intel E5000 PCIE ports */
+ || (bridge->vendor == PCI_VENDOR_ID_INTEL
+ && bridge->device >=
+ PCI_DEVICE_ID_INTEL_E5000_PCIE23
+ && bridge->device <=
+ PCI_DEVICE_ID_INTEL_E5000_PCIE47))) {
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (0x%x:0x%x)\n",
bridge->vendor, bridge->device);
mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+ mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
init_waitqueue_head(&mgp->down_wq);
if (pci_enable_device(pdev)) {
* 0.20 - fix stupid RFEN thinko. i am such a smurf.
* 20040828 0.21 - add hardware vlan accleration
- * 20050406 0.22 - improved DAC ifdefs from Andi Kleen
+ * 20050406 0.22 - improved DAC ifdefs from Andi Kleen
* - removal of dead code from Adrian Bunk
* - fix half duplex collision behaviour
* Driver Overview
#define LINK_DOWN 0x02
#define LINK_UP 0x04
- #define HW_ADDR_LEN sizeof(dma_addr_t)
+ #define HW_ADDR_LEN sizeof(dma_addr_t)
#define desc_addr_set(desc, addr) \
do { \
((desc)[0] = cpu_to_le32(addr)); \
(((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)
- #ifdef NS83820_VLAN_ACCEL_SUPPORT
+ #ifdef NS83820_VLAN_ACCEL_SUPPORT
static void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
{
struct ns83820 *dev = PRIV(ndev);
}
/* rx_irq
- *
+ *
*/
static void FASTCALL(rx_irq(struct net_device *ndev));
static void fastcall rx_irq(struct net_device *ndev)
* that are 64 bytes with a vlan header appended
* like arp frames, or pings, are flagged as Runts
* when the tag is stripped and hardware. This
- * also means that the OK bit in the descriptor
+ * also means that the OK bit in the descriptor
* is cleared when the frame comes in so we have
* to do a specific length check here to make sure
* the frame would have been ok, had we not stripped
* the tag.
- */
+ */
if (likely((CMDSTS_OK & cmdsts) ||
- ((cmdsts & CMDSTS_RUNT) && len >= 56))) {
+ ((cmdsts & CMDSTS_RUNT) && len >= 56))) {
#else
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb->ip_summed = CHECKSUM_NONE;
}
skb->protocol = eth_type_trans(skb, ndev);
- #ifdef NS83820_VLAN_ACCEL_SUPPORT
+ #ifdef NS83820_VLAN_ACCEL_SUPPORT
if(extsts & EXTSTS_VPKT) {
unsigned short tag;
tag = ntohs(extsts & EXTSTS_VTG_MASK);
dev_kfree_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
} else
- pci_unmap_page(dev->pci_dev,
+ pci_unmap_page(dev->pci_dev,
addr,
len,
PCI_DMA_TODEVICE);
if (!nr_frags)
frag = NULL;
extsts = 0;
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
extsts |= EXTSTS_IPPKT;
if (IPPROTO_TCP == skb->nh.iph->protocol)
extsts |= EXTSTS_TCPPKT;
return cfg & CFG_LNKSTS ? 1 : 0;
}
- static struct ethtool_ops ops = {
+ static const struct ethtool_ops ops = {
.get_drvinfo = ns83820_get_drvinfo,
.get_link = ns83820_get_link
};
dev->tx_idx = 0;
}
/* The may have been a race between a pci originated read
- * and the descriptor update from the cpu. Just in case,
- * kick the transmitter if the hardware thinks it is on a
+ * and the descriptor update from the cpu. Just in case,
+ * kick the transmitter if the hardware thinks it is on a
* different descriptor than we are.
*/
if (dev->tx_idx != dev->tx_free_idx)
/* The TxIdle interrupt can come in before the transmit has
* completed. Normally we reap packets off of the combination
- * of TxDesc and TxIdle and leave TxOk disabled (since it
- * occurs on every packet), but when no further irqs of this
+ * of TxDesc and TxIdle and leave TxOk disabled (since it
+ * occurs on every packet), but when no further irqs of this
* nature are expected, we must enable TxOk.
*/
if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
/* When compiled with 64 bit addressing, we must always enable
* the 64 bit descriptor format.
*/
- if (sizeof(dma_addr_t) == 8)
+ if (sizeof(dma_addr_t) == 8)
dev->CFG_cache |= CFG_M64ADDR;
if (using_dac)
dev->CFG_cache |= CFG_T64ADDR;
writel(dev->CFG_cache, dev->base + CFG);
}
- #if 0 /* Huh? This sets the PCI latency register. Should be done via
+ #if 0 /* Huh? This sets the PCI latency register. Should be done via
* the PCI layer. FIXME.
*/
if (readl(dev->base + SRR))
* can be transmitted is 8192 - FLTH - burst size.
* If only the transmit fifo was larger...
*/
- /* Ramit : 1024 DMA is not a good idea, it ends up banging
+ /* Ramit : 1024 DMA is not a good idea, it ends up banging
* some DELL and COMPAQ SMP systems */
writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512
| ((1600 / 32) * 0x100),
/* Set Rx to full duplex, don't accept runt, errored, long or length
* range errored packets. Use 512 byte DMA.
*/
- /* Ramit : 1024 DMA is not a good idea, it ends up banging
- * some DELL and COMPAQ SMP systems
+ /* Ramit : 1024 DMA is not a good idea, it ends up banging
+ * some DELL and COMPAQ SMP systems
* Turn on ALP, only we are accpeting Jumbo Packets */
writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD
| RXCFG_STRIPCRC
* also turn on tag stripping if hardware acceleration is enabled
*/
#ifdef NS83820_VLAN_ACCEL_SUPPORT
- #define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN)
+ #define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN)
#else
#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN)
#endif
static int __init ns83820_init(void)
{
printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void __exit ns83820_exit(void)
History:
May 20 2002 - Add link status force-mode and TBI mode support.
- 2004 - Massive updates. See kernel SCM system for details.
+ 2004 - Massive updates. See kernel SCM system for details.
=========================================================================
1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
Command: 'insmod r8169 media = SET_MEDIA'
Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
-
+
SET_MEDIA can be:
_10_Half = 0x01
_10_Full = 0x02
_100_Half = 0x04
_100_Full = 0x08
_1000_Full = 0x10
-
+
2. Support TBI mode.
=========================================================================
VERSION 1.1 <2002/10/4>
The bit4:0 of MII register 4 is called "selector field", and have to be
00001b to indicate support of IEEE std 802.3 during NWay process of
- exchanging Link Code Word (FLP).
+ exchanging Link Code Word (FLP).
VERSION 1.2 <2002/11/30>
#ifdef RTL8169_DEBUG
#define assert(expr) \
- if(!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__FUNCTION__,__LINE__); \
- }
+ if (!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
#else
#define assert(expr) do {} while (0)
#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
enum mac_version {
- RTL_GIGA_MAC_VER_B = 0x00,
- /* RTL_GIGA_MAC_VER_C = 0x03, */
- RTL_GIGA_MAC_VER_D = 0x01,
- RTL_GIGA_MAC_VER_E = 0x02,
- RTL_GIGA_MAC_VER_X = 0x04 /* Greater than RTL_GIGA_MAC_VER_E */
+ RTL_GIGA_MAC_VER_01 = 0x00,
+ RTL_GIGA_MAC_VER_02 = 0x01,
+ RTL_GIGA_MAC_VER_03 = 0x02,
+ RTL_GIGA_MAC_VER_04 = 0x03,
+ RTL_GIGA_MAC_VER_05 = 0x04,
+ RTL_GIGA_MAC_VER_11 = 0x0b,
+ RTL_GIGA_MAC_VER_12 = 0x0c,
+ RTL_GIGA_MAC_VER_13 = 0x0d,
+ RTL_GIGA_MAC_VER_14 = 0x0e,
+ RTL_GIGA_MAC_VER_15 = 0x0f
};
enum phy_version {
RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
};
-
#define _R(NAME,MAC,MASK) \
{ .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
u8 mac_version;
u32 RxConfigMask; /* Clears the bits supported by this chip */
} rtl_chip_info[] = {
- _R("RTL8169", RTL_GIGA_MAC_VER_B, 0xff7e1880),
- _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_D, 0xff7e1880),
- _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_E, 0xff7e1880),
- _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_X, 0xff7e1880),
+ _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
+ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
+ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
+ _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
+ _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
+ _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
+ _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
+ _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
+ _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
+ _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
};
#undef _R
+ enum cfg_version {
+ RTL_CFG_0 = 0x00,
+ RTL_CFG_1,
+ RTL_CFG_2
+ };
+
+ static const struct {
+ unsigned int region;
+ unsigned int align;
+ } rtl_cfg_info[] = {
+ [RTL_CFG_0] = { 1, NET_IP_ALIGN },
+ [RTL_CFG_1] = { 2, NET_IP_ALIGN },
+ [RTL_CFG_2] = { 2, 8 }
+ };
+
static struct pci_device_id rtl8169_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
- { PCI_DEVICE(0x16ec, 0x0116), },
- { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032,
+ PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
{0,},
};
RxOK = 0x01,
/* RxStatusDesc */
- RxRES = 0x00200000,
- RxCRC = 0x00080000,
- RxRUNT = 0x00100000,
- RxRWT = 0x00400000,
+ RxFOVF = (1 << 23),
+ RxRWT = (1 << 22),
+ RxRES = (1 << 21),
+ RxRUNT = (1 << 20),
+ RxCRC = (1 << 19),
/* ChipCmdBits */
CmdReset = 0x10,
LinkStatus = 0x02,
FullDup = 0x01,
- /* GIGABIT_PHY_registers */
- PHY_CTRL_REG = 0,
- PHY_STAT_REG = 1,
- PHY_AUTO_NEGO_REG = 4,
- PHY_1000_CTRL_REG = 9,
-
- /* GIGABIT_PHY_REG_BIT */
- PHY_Restart_Auto_Nego = 0x0200,
- PHY_Enable_Auto_Nego = 0x1000,
-
- /* PHY_STAT_REG = 1 */
- PHY_Auto_Neco_Comp = 0x0020,
-
- /* PHY_AUTO_NEGO_REG = 4 */
- PHY_Cap_10_Half = 0x0020,
- PHY_Cap_10_Full = 0x0040,
- PHY_Cap_100_Half = 0x0080,
- PHY_Cap_100_Full = 0x0100,
-
- /* PHY_1000_CTRL_REG = 9 */
- PHY_Cap_1000_Full = 0x0200,
-
- PHY_Cap_Null = 0x0,
-
/* _MediaType */
_10_Half = 0x01,
_10_Full = 0x02,
dma_addr_t RxPhyAddr;
struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
+ unsigned align;
unsigned rx_buf_sz;
struct timer_list timer;
u16 cp_cmd;
static const u16 rtl8169_napi_event =
RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
static const unsigned int rtl8169_rx_config =
- (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
-
- #define PHY_Cap_10_Half_Or_Less PHY_Cap_10_Half
- #define PHY_Cap_10_Full_Or_Less PHY_Cap_10_Full | PHY_Cap_10_Half_Or_Less
- #define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
- #define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
+ (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
{
for (i = 20; i > 0; i--) {
/* Check if the RTL8169 has completed writing to the specified MII register */
- if (!(RTL_R32(PHYAR) & 0x80000000))
+ if (!(RTL_R32(PHYAR) & 0x80000000))
break;
udelay(25);
}
static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
{
- return mdio_read(ioaddr, 0) & 0x8000;
+ return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
}
static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
{
unsigned int val;
- val = (mdio_read(ioaddr, PHY_CTRL_REG) | 0x8000) & 0xffff;
- mdio_write(ioaddr, PHY_CTRL_REG, val);
+ val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff;
+ mdio_write(ioaddr, MII_BMCR, val);
}
static void rtl8169_check_link_status(struct net_device *dev,
{ SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
}, *p;
unsigned char option;
-
+
option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
if ((option != 0xff) && !idx && netif_msg_drv(&debug))
if (options & UWF)
wol->wolopts |= WAKE_UCAST;
if (options & BWF)
- wol->wolopts |= WAKE_BCAST;
+ wol->wolopts |= WAKE_BCAST;
if (options & MWF)
- wol->wolopts |= WAKE_MCAST;
+ wol->wolopts |= WAKE_MCAST;
out_unlock:
spin_unlock_irq(&tp->lock);
void __iomem *ioaddr = tp->mmio_addr;
int auto_nego, giga_ctrl;
- auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
- auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_10_Full |
- PHY_Cap_100_Half | PHY_Cap_100_Full);
- giga_ctrl = mdio_read(ioaddr, PHY_1000_CTRL_REG);
- giga_ctrl &= ~(PHY_Cap_1000_Full | PHY_Cap_Null);
+ auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
+ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
+ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
if (autoneg == AUTONEG_ENABLE) {
- auto_nego |= (PHY_Cap_10_Half | PHY_Cap_10_Full |
- PHY_Cap_100_Half | PHY_Cap_100_Full);
- giga_ctrl |= PHY_Cap_1000_Full;
+ auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
} else {
if (speed == SPEED_10)
- auto_nego |= PHY_Cap_10_Half | PHY_Cap_10_Full;
+ auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
else if (speed == SPEED_100)
- auto_nego |= PHY_Cap_100_Half | PHY_Cap_100_Full;
+ auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
else if (speed == SPEED_1000)
- giga_ctrl |= PHY_Cap_1000_Full;
+ giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
if (duplex == DUPLEX_HALF)
- auto_nego &= ~(PHY_Cap_10_Full | PHY_Cap_100_Full);
+ auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
if (duplex == DUPLEX_FULL)
- auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_100_Half);
+ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
+
+ /* This tweak comes straight from Realtek's driver. */
+ if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
+ auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
+ }
+ }
+
+ /* The 8100e/8101e do Fast Ethernet only. */
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
+ if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
+ netif_msg_link(tp)) {
+ printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
+ dev->name);
+ }
+ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
}
+ auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
tp->phy_auto_nego_reg = auto_nego;
tp->phy_1000_ctrl_reg = giga_ctrl;
- mdio_write(ioaddr, PHY_AUTO_NEGO_REG, auto_nego);
- mdio_write(ioaddr, PHY_1000_CTRL_REG, giga_ctrl);
- mdio_write(ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego |
- PHY_Restart_Auto_Nego);
+ mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
+ mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
+ mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
return 0;
}
ret = tp->set_speed(dev, autoneg, speed, duplex);
- if (netif_running(dev) && (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
+ if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
return ret;
spin_lock_irqsave(&tp->lock, flags);
ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
spin_unlock_irqrestore(&tp->lock, flags);
-
+
return ret;
}
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
- SUPPORTED_TP;
+ SUPPORTED_TP;
cmd->autoneg = 1;
cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
- if (tp->phy_auto_nego_reg & PHY_Cap_10_Half)
+ if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
cmd->advertising |= ADVERTISED_10baseT_Half;
- if (tp->phy_auto_nego_reg & PHY_Cap_10_Full)
+ if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
cmd->advertising |= ADVERTISED_10baseT_Full;
- if (tp->phy_auto_nego_reg & PHY_Cap_100_Half)
+ if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
cmd->advertising |= ADVERTISED_100baseT_Half;
- if (tp->phy_auto_nego_reg & PHY_Cap_100_Full)
+ if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
cmd->advertising |= ADVERTISED_100baseT_Full;
- if (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full)
+ if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
cmd->advertising |= ADVERTISED_1000baseT_Full;
status = RTL_R8(PHYstatus);
else if (status & _10bps)
cmd->speed = SPEED_10;
+ if (status & TxFlowCtrl)
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ if (status & RxFlowCtrl)
+ cmd->advertising |= ADVERTISED_Pause;
+
cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
DUPLEX_FULL : DUPLEX_HALF;
}
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
- if (regs->len > R8169_REGS_SIZE)
- regs->len = R8169_REGS_SIZE;
+ if (regs->len > R8169_REGS_SIZE)
+ regs->len = R8169_REGS_SIZE;
- spin_lock_irqsave(&tp->lock, flags);
- memcpy_fromio(p, tp->mmio_addr, regs->len);
- spin_unlock_irqrestore(&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
+ memcpy_fromio(p, tp->mmio_addr, regs->len);
+ spin_unlock_irqrestore(&tp->lock, flags);
}
static u32 rtl8169_get_msglevel(struct net_device *dev)
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
- data[0] = le64_to_cpu(counters->tx_packets);
+ data[0] = le64_to_cpu(counters->tx_packets);
data[1] = le64_to_cpu(counters->rx_packets);
data[2] = le64_to_cpu(counters->tx_errors);
data[3] = le32_to_cpu(counters->rx_errors);
}
- static struct ethtool_ops rtl8169_ethtool_ops = {
+ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
val = mdio_read(ioaddr, reg);
val = (bitval == 1) ?
val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
- mdio_write(ioaddr, reg, val & 0xffff);
+ mdio_write(ioaddr, reg, val & 0xffff);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
u32 mask;
int mac_version;
} mac_info[] = {
- { 0x1 << 28, RTL_GIGA_MAC_VER_X },
- { 0x1 << 26, RTL_GIGA_MAC_VER_E },
- { 0x1 << 23, RTL_GIGA_MAC_VER_D },
- { 0x00000000, RTL_GIGA_MAC_VER_B } /* Catch-all */
+ { 0x38800000, RTL_GIGA_MAC_VER_15 },
+ { 0x38000000, RTL_GIGA_MAC_VER_12 },
+ { 0x34000000, RTL_GIGA_MAC_VER_13 },
+ { 0x30800000, RTL_GIGA_MAC_VER_14 },
+ { 0x30000000, RTL_GIGA_MAC_VER_11 },
+ { 0x18000000, RTL_GIGA_MAC_VER_05 },
+ { 0x10000000, RTL_GIGA_MAC_VER_04 },
+ { 0x04000000, RTL_GIGA_MAC_VER_03 },
+ { 0x00800000, RTL_GIGA_MAC_VER_02 },
+ { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
}, *p = mac_info;
u32 reg;
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
{
- struct {
- int version;
- char *msg;
- } mac_print[] = {
- { RTL_GIGA_MAC_VER_E, "RTL_GIGA_MAC_VER_E" },
- { RTL_GIGA_MAC_VER_D, "RTL_GIGA_MAC_VER_D" },
- { RTL_GIGA_MAC_VER_B, "RTL_GIGA_MAC_VER_B" },
- { 0, NULL }
- }, *p;
-
- for (p = mac_print; p->msg; p++) {
- if (tp->mac_version == p->version) {
- dprintk("mac_version == %s (%04d)\n", p->msg,
- p->version);
- return;
- }
- }
- dprintk("mac_version == Unknown\n");
+ dprintk("mac_version = 0x%02x\n", tp->mac_version);
}
static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
}, *p = phy_info;
u16 reg;
- reg = mdio_read(ioaddr, 3) & 0xffff;
+ reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
while ((reg & p->mask) != p->set)
p++;
tp->phy_version = p->phy_version;
rtl8169_print_mac_version(tp);
rtl8169_print_phy_version(tp);
- if (tp->mac_version <= RTL_GIGA_MAC_VER_B)
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
return;
if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
return;
/* Shazam ! */
- if (tp->mac_version == RTL_GIGA_MAC_VER_X) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
mdio_write(ioaddr, 31, 0x0001);
mdio_write(ioaddr, 9, 0x273a);
mdio_write(ioaddr, 14, 0x7bfb);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long timeout = RTL8169_PHY_TIMEOUT;
- assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
+ assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
- if (!(tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
+ if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
return;
spin_lock_irq(&tp->lock);
if (tp->phy_reset_pending(ioaddr)) {
- /*
+ /*
* A busy loop could burn quite a few cycles on nowadays CPU.
* Let's delay the execution of the timer for a few ticks.
*/
struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
- if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
+ if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
(tp->phy_version >= RTL_GIGA_PHY_VER_H))
return;
struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
- if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
+ if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
(tp->phy_version >= RTL_GIGA_PHY_VER_H))
return;
}
#endif
+ static void __rtl8169_set_mac_addr(struct net_device *dev, void __iomem *ioaddr)
+ {
+ unsigned int i, j;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ for (i = 0; i < 2; i++) {
+ __le32 l = 0;
+
+ for (j = 0; j < 4; j++) {
+ l <<= 8;
+ l |= dev->dev_addr[4*i + j];
+ }
+ RTL_W32(MAC0 + 4*i, cpu_to_be32(l));
+ }
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+ }
+
+ static int rtl8169_set_mac_addr(struct net_device *dev, void *p)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (netif_running(dev)) {
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_mac_addr(dev, tp->mmio_addr);
+ spin_unlock_irq(&tp->lock);
+ }
+ return 0;
+ }
+
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
{
free_netdev(dev);
}
+ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+ static int board_idx = -1;
+ u8 autoneg, duplex;
+ u16 speed;
+
+ board_idx++;
+
+ rtl8169_hw_phy_config(dev);
+
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+
+ if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
+ dprintk("Set PCI Latency=0x40\n");
+ pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
+ }
+
+ rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
+
+ rtl8169_set_speed(dev, autoneg, speed, duplex);
+
+ if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
+ printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+ }
+
+ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (!netif_running(dev))
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 32; /* Internal PHY */
+ return 0;
+
+ case SIOCGMIIREG:
+ data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+ }
+
static int __devinit
- rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
- void __iomem **ioaddr_out)
+ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- void __iomem *ioaddr;
- struct net_device *dev;
+ const unsigned int region = rtl_cfg_info[ent->driver_data].region;
struct rtl8169_private *tp;
- int rc = -ENOMEM, i, acpi_idle_state = 0, pm_cap;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ unsigned int i, pm_cap;
+ int rc;
- assert(ioaddr_out != NULL);
+ if (netif_msg_drv(&debug)) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ }
- /* dev zeroed in alloc_etherdev */
dev = alloc_etherdev(sizeof (*tp));
- if (dev == NULL) {
+ if (!dev) {
if (netif_msg_drv(&debug))
dev_err(&pdev->dev, "unable to alloc new ethernet\n");
- goto err_out;
+ rc = -ENOMEM;
+ goto out;
}
SET_MODULE_OWNER(dev);
if (rc < 0) {
if (netif_msg_probe(tp))
dev_err(&pdev->dev, "enable failure\n");
- goto err_out_free_dev;
+ goto err_out_free_dev_1;
}
rc = pci_set_mwi(pdev);
if (rc < 0)
- goto err_out_disable;
+ goto err_out_disable_2;
/* save power state before pci_enable_device overwrites it */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap) {
- u16 pwr_command;
+ u16 pwr_command, acpi_idle_state;
pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
} else {
- if (netif_msg_probe(tp))
+ if (netif_msg_probe(tp)) {
dev_err(&pdev->dev,
- "PowerManagement capability not found.\n");
+ "PowerManagement capability not found.\n");
+ }
}
/* make sure PCI base addr 1 is MMIO */
- if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- if (netif_msg_probe(tp))
+ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
+ if (netif_msg_probe(tp)) {
dev_err(&pdev->dev,
- "region #1 not an MMIO resource, aborting\n");
+ "region #%d not an MMIO resource, aborting\n",
+ region);
+ }
rc = -ENODEV;
- goto err_out_mwi;
+ goto err_out_mwi_3;
}
+
/* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
- if (netif_msg_probe(tp))
+ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
+ if (netif_msg_probe(tp)) {
dev_err(&pdev->dev,
- "Invalid PCI region size(s), aborting\n");
+ "Invalid PCI region size(s), aborting\n");
+ }
rc = -ENODEV;
- goto err_out_mwi;
+ goto err_out_mwi_3;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
if (netif_msg_probe(tp))
dev_err(&pdev->dev, "could not request regions.\n");
- goto err_out_mwi;
+ goto err_out_mwi_3;
}
tp->cp_cmd = PCIMulRW | RxChkSum;
} else {
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc < 0) {
- if (netif_msg_probe(tp))
+ if (netif_msg_probe(tp)) {
dev_err(&pdev->dev,
- "DMA configuration failed.\n");
- goto err_out_free_res;
+ "DMA configuration failed.\n");
+ }
+ goto err_out_free_res_4;
}
}
pci_set_master(pdev);
/* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
- if (ioaddr == NULL) {
+ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ if (!ioaddr) {
if (netif_msg_probe(tp))
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
- goto err_out_free_res;
+ goto err_out_free_res_4;
}
/* Unneeded ? Don't mess with Mrs. Murphy. */
RTL_W8(ChipCmd, CmdReset);
/* Check that the chip has finished the reset. */
- for (i = 1000; i > 0; i--) {
+ for (i = 100; i > 0; i--) {
if ((RTL_R8(ChipCmd) & CmdReset) == 0)
break;
- udelay(10);
+ msleep_interruptible(1);
}
/* Identify chip attached to board */
/* Unknown chip: assume array element #0, original RTL-8169 */
if (netif_msg_probe(tp)) {
dev_printk(KERN_DEBUG, &pdev->dev,
- "unknown chip version, assuming %s\n",
- rtl_chip_info[0].name);
+ "unknown chip version, assuming %s\n",
+ rtl_chip_info[0].name);
}
i++;
}
RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
RTL_W8(Cfg9346, Cfg9346_Lock);
- *ioaddr_out = ioaddr;
- *dev_out = dev;
- out:
- return rc;
-
- err_out_free_res:
- pci_release_regions(pdev);
-
- err_out_mwi:
- pci_clear_mwi(pdev);
-
- err_out_disable:
- pci_disable_device(pdev);
-
- err_out_free_dev:
- free_netdev(dev);
- err_out:
- *ioaddr_out = NULL;
- *dev_out = NULL;
- goto out;
- }
-
- static int __devinit
- rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- {
- struct net_device *dev = NULL;
- struct rtl8169_private *tp;
- void __iomem *ioaddr = NULL;
- static int board_idx = -1;
- u8 autoneg, duplex;
- u16 speed;
- int i, rc;
-
- assert(pdev != NULL);
- assert(ent != NULL);
-
- board_idx++;
-
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
-
- rc = rtl8169_init_board(pdev, &dev, &ioaddr);
- if (rc)
- return rc;
-
- tp = netdev_priv(dev);
- assert(ioaddr != NULL);
-
if (RTL_R8(PHYstatus) & TBI_Enable) {
tp->set_speed = rtl8169_set_speed_tbi;
tp->get_settings = rtl8169_gset_tbi;
tp->phy_reset_pending = rtl8169_tbi_reset_pending;
tp->link_ok = rtl8169_tbi_link_ok;
- tp->phy_1000_ctrl_reg = PHY_Cap_1000_Full; /* Implied by TBI */
+ tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
} else {
tp->set_speed = rtl8169_set_speed_xmii;
tp->get_settings = rtl8169_gset_xmii;
tp->phy_reset_enable = rtl8169_xmii_reset_enable;
tp->phy_reset_pending = rtl8169_xmii_reset_pending;
tp->link_ok = rtl8169_xmii_link_ok;
+
+ dev->do_ioctl = rtl8169_ioctl;
}
/* Get MAC address. FIXME: read EEPROM */
dev->stop = rtl8169_close;
dev->tx_timeout = rtl8169_tx_timeout;
dev->set_multicast_list = rtl8169_set_rx_mode;
+ dev->set_mac_address = rtl8169_set_mac_addr;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
tp->intr_mask = 0xffff;
tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
+ tp->align = rtl_cfg_info[ent->driver_data].align;
spin_lock_init(&tp->lock);
rc = register_netdev(dev);
- if (rc) {
- rtl8169_release_board(pdev, dev, ioaddr);
- return rc;
- }
-
- if (netif_msg_probe(tp)) {
- printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n",
- dev->name, rtl_chip_info[tp->chipset].name);
- }
+ if (rc < 0)
+ goto err_out_unmap_5;
pci_set_drvdata(pdev, dev);
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
"IRQ %d\n",
dev->name,
- rtl_chip_info[ent->driver_data].name,
+ rtl_chip_info[tp->chipset].name,
dev->base_addr,
dev->dev_addr[0], dev->dev_addr[1],
dev->dev_addr[2], dev->dev_addr[3],
dev->dev_addr[4], dev->dev_addr[5], dev->irq);
}
- rtl8169_hw_phy_config(dev);
-
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
- RTL_W8(0x82, 0x01);
+ rtl8169_init_phy(dev, tp);
- if (tp->mac_version < RTL_GIGA_MAC_VER_E) {
- dprintk("Set PCI Latency=0x40\n");
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
- }
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
- RTL_W8(0x82, 0x01);
- dprintk("Set PHY Reg 0x0bh = 0x00h\n");
- mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
- }
-
- rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
-
- rtl8169_set_speed(dev, autoneg, speed, duplex);
-
- if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+ out:
+ return rc;
- return 0;
+ err_out_unmap_5:
+ iounmap(ioaddr);
+ err_out_free_res_4:
+ pci_release_regions(pdev);
+ err_out_mwi_3:
+ pci_clear_mwi(pdev);
+ err_out_disable_2:
+ pci_disable_device(pdev);
+ err_out_free_dev_1:
+ free_netdev(dev);
+ goto out;
}
static void __devexit
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
u32 i;
/* Soft reset the chip. */
RTL_W8(ChipCmd, CmdReset);
/* Check that the chip has finished the reset. */
- for (i = 1000; i > 0; i--) {
+ for (i = 100; i > 0; i--) {
if ((RTL_R8(ChipCmd) & CmdReset) == 0)
break;
- udelay(10);
+ msleep_interruptible(1);
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
+ pci_write_config_word(pdev, 0x68, 0x00);
+ pci_write_config_word(pdev, 0x69, 0x08);
+ }
+
+ /* Undocumented stuff. */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
+ u16 cmd;
+
+ /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
+ if ((RTL_R8(Config2) & 0x07) & 0x01)
+ RTL_W32(0x7c, 0x0007ffff);
+
+ RTL_W32(0x7c, 0x0007ff00);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd = cmd & 0xef;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
+
RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W8(EarlyTxThres, EarlyTxThld);
/* Low hurts. Let's disable the filtering. */
RTL_W32(RxConfig, i);
/* Set DMA burst size and Interframe Gap Time */
- RTL_W32(TxConfig,
- (TX_DMA_BURST << TxDMAShift) | (InterFrameGap <<
- TxInterFrameGapShift));
- tp->cp_cmd |= RTL_R16(CPlusCmd);
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
+
+ tp->cp_cmd |= RTL_R16(CPlusCmd) | PCIMulRW;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_D) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_E)) {
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
"Bit-3 and bit-14 MUST be 1\n");
- tp->cp_cmd |= (1 << 14) | PCIMulRW;
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ tp->cp_cmd |= (1 << 14);
}
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
/*
* Undocumented corner. Supposedly:
* (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
*/
RTL_W16(IntrMitigate, 0x0000);
- RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
+ /*
+ * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
+ * register to be written before TxDescAddrLow to work.
+ * Switching from MMIO to I/O access fixes the issue as well.
+ */
RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
- RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
+ RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
+ RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W8(Cfg9346, Cfg9346_Lock);
- udelay(10);
+
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(IntrMask);
RTL_W32(RxMissed, 0);
/* Enable all known interrupts by setting the interrupt mask. */
RTL_W16(IntrMask, rtl8169_intr_mask);
+ __rtl8169_set_mac_addr(dev, ioaddr);
+
netif_start_queue(dev);
}
}
static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
- struct RxDesc *desc, int rx_buf_sz)
+ struct RxDesc *desc, int rx_buf_sz,
+ unsigned int align)
{
struct sk_buff *skb;
dma_addr_t mapping;
int ret = 0;
- skb = dev_alloc_skb(rx_buf_sz + NET_IP_ALIGN);
+ skb = dev_alloc_skb(rx_buf_sz + align);
if (!skb)
goto err_out;
- skb_reserve(skb, NET_IP_ALIGN);
+ skb_reserve(skb, align);
*sk_buff = skb;
mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
u32 start, u32 end)
{
u32 cur;
-
+
for (cur = start; end - cur > 0; cur++) {
int ret, i = cur % NUM_RX_DESC;
if (tp->Rx_skbuff[i])
continue;
-
+
ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
- tp->RxDescArray + i, tp->rx_buf_sz);
+ tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
if (ret < 0)
break;
}
if (mss)
return LargeSend | ((mss & MSSMask) << MSSShift);
}
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
dma_addr_t mapping;
u32 status, len;
u32 opts1;
- int ret = 0;
-
+ int ret = NETDEV_TX_OK;
+
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
if (netif_msg_drv(tp)) {
printk(KERN_ERR
err_stop:
netif_stop_queue(dev);
- ret = 1;
+ ret = NETDEV_TX_BUSY;
err_update_stats:
tp->stats.tx_dropped++;
goto out;
}
static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
- struct RxDesc *desc, int rx_buf_sz)
+ struct RxDesc *desc, int rx_buf_sz,
+ unsigned int align)
{
int ret = -1;
if (pkt_size < rx_copybreak) {
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
+ skb = dev_alloc_skb(pkt_size + align);
if (skb) {
- skb_reserve(skb, NET_IP_ALIGN);
+ skb_reserve(skb, align);
eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
*sk_buff = skb;
rtl8169_mark_to_asic(desc, rx_buf_sz);
tp->stats.rx_length_errors++;
if (status & RxCRC)
tp->stats.rx_crc_errors++;
+ if (status & RxFOVF) {
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ tp->stats.rx_fifo_errors++;
+ }
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
struct sk_buff *skb = tp->Rx_skbuff[entry];
}
rtl8169_rx_csum(skb, desc);
-
+
pci_dma_sync_single_for_cpu(tp->pci_dev,
le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
- tp->rx_buf_sz)) {
+ tp->rx_buf_sz, tp->align)) {
pci_action = pci_unmap_single;
tp->Rx_skbuff[entry] = NULL;
}
__netif_rx_schedule(dev);
else if (netif_msg_intr(tp)) {
printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
- dev->name, status);
+ dev->name, status);
}
break;
#else
tmp = rtl8169_rx_config | rx_mode |
(RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
+ mc_filter[0] = 0xffffffff;
+ mc_filter[1] = 0xffffffff;
+ }
+
RTL_W32(RxConfig, tmp);
RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(MAR0 + 4, mc_filter[1]);
RTL_W32(RxMissed, 0);
spin_unlock_irqrestore(&tp->lock, flags);
}
-
+
return &tp->stats;
}
static int __init
rtl8169_init_module(void)
{
- return pci_module_init(&rtl8169_pci_driver);
+ return pci_register_driver(&rtl8169_pci_driver);
}
static void __exit
*/
if (!tmp_p) {
mac_control->zerodma_virt_addr = tmp_v;
- DBG_PRINT(INIT_DBG,
+ DBG_PRINT(INIT_DBG,
"%s: Zero DMA address for TxDL. ", dev->name);
- DBG_PRINT(INIT_DBG,
+ DBG_PRINT(INIT_DBG,
"Virtual address %p\n", tmp_v);
tmp_v = pci_alloc_consistent(nic->pdev,
PAGE_SIZE, &tmp_p);
for (j = 0; j < page_num; j++) {
int mem_blks = (j * lst_per_page);
if (!mac_control->fifos[i].list_info)
- return;
+ return;
if (!mac_control->fifos[i].list_info[mem_blks].
list_virt_addr)
break;
pci_free_consistent(nic->pdev, PAGE_SIZE,
mac_control->zerodma_virt_addr,
(dma_addr_t)0);
- DBG_PRINT(INIT_DBG,
+ DBG_PRINT(INIT_DBG,
"%s: Freeing TxDL with zero DMA addr. ",
dev->name);
DBG_PRINT(INIT_DBG, "Virtual address %p\n",
static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
{
struct pci_dev *tdev = NULL;
- while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
- if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
+ while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
+ if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
if (tdev->bus == s2io_pdev->bus->parent)
+ pci_dev_put(tdev);
return 1;
}
}
writeq(val64, &bar0->rx_w_round_robin_1);
val64 = 0x0200010000010203ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
- val64 = 0x0001020001000001ULL;
+ val64 = 0x0001020001000001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0203000100000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
if (!txds->Buffer_Pointer)
break;
- pci_unmap_page(nic->pdev, (dma_addr_t)
+ pci_unmap_page(nic->pdev, (dma_addr_t)
txds->Buffer_Pointer,
frag->size, PCI_DMA_TODEVICE);
}
/* Two buffer mode */
/*
- * Buffer2 will have L3/L4 header plus
+ * Buffer2 will have L3/L4 header plus
* L4 payload
*/
((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
/* Buffer-1 will be dummy buffer. Not used */
if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
((RxD3_t*)rxdp)->Buffer1_ptr =
- pci_map_single(nic->pdev,
+ pci_map_single(nic->pdev,
ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
}
((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
PCI_DMA_FROMDEVICE);
pci_unmap_single(sp->pdev, (dma_addr_t)
- ((RxD3_t*)rxdp)->Buffer1_ptr,
+ ((RxD3_t*)rxdp)->Buffer1_ptr,
l3l4hdr_size + 4,
PCI_DMA_FROMDEVICE);
pci_unmap_single(sp->pdev, (dma_addr_t)
writeq(val64, &bar0->rx_traffic_int);
writeq(val64, &bar0->tx_traffic_int);
- /* we need to free up the transmitted skbufs or else netpoll will
+ /* we need to free up the transmitted skbufs or else netpoll will
* run out of skbs and will fail and eventually netpoll application such
* as netdump will fail.
*/
if (val64 & SERR_SOURCE_ANY) {
nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
- DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
+ DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
(unsigned long long)val64);
netif_stop_queue(dev);
schedule_work(&nic->rst_timer_task);
txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
}
#endif
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
txdp->Control_2 |=
(TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
TXD_TX_CKO_UDP_EN);
if (sp->device_type == XFRAME_II_DEVICE) {
val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
- SPI_CONTROL_BYTECNT(0x3) |
+ SPI_CONTROL_BYTECNT(0x3) |
SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
val64 |= SPI_CONTROL_REQ;
writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
- SPI_CONTROL_BYTECNT(write_cnt) |
+ SPI_CONTROL_BYTECNT(write_cnt) |
SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
val64 |= SPI_CONTROL_REQ;
if (stat_info->sw_stat.num_aggregations) {
u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
int count = 0;
- /*
+ /*
* Since 64-bit divide does not work on all platforms,
* do repeated subtraction.
*/
return 0;
}
- static struct ethtool_ops netdev_ethtool_ops = {
+ static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = s2io_ethtool_gset,
.set_settings = s2io_ethtool_sset,
.get_drvinfo = s2io_ethtool_gdrvinfo,
} else {
send_up:
queue_rx_frame(skb);
- }
+ }
dev->last_rx = jiffies;
aggregate:
atomic_dec(&sp->rx_bufs_left[ring_no]);
if ((*dev_intr_type == MSI_X) &&
((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
(pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
- DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
+ DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
"Defaulting to INTA\n");
*dev_intr_type = INTA;
}
sp->device_type = XFRAME_I_DEVICE;
sp->lro = lro;
-
+
/* Initialize some PCI/PCI-X fields of the NIC. */
s2io_init_pci(sp);
int __init s2io_starter(void)
{
- return pci_module_init(&s2io_driver);
+ return pci_register_driver(&s2io_driver);
}
/**
module_init(s2io_starter);
module_exit(s2io_closer);
- static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
+ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
struct tcphdr **tcp, RxD_t *rxdp)
{
int ip_off;
lro->sg_num = 1;
lro->total_len = ntohs(ip->tot_len);
lro->frags_len = 0;
- /*
+ /*
* check if we saw TCP timestamp. Other consistency checks have
* already been done.
*/
/* Update ack seq no. and window ad(from this pkt) in LRO object */
lro->tcp_ack = tcp->ack_seq;
lro->window = tcp->window;
-
+
if (lro->saw_ts) {
u32 *ptr;
/* Update tsecr and tsval from this packet */
ptr = (u32 *) (tcp + 1);
- lro->cur_tsval = *(ptr + 1);
+ lro->cur_tsval = *(ptr + 1);
lro->cur_tsecr = *(ptr + 2);
}
}
return -1;
}
- /*
+ /*
* Allow only one TCP timestamp option. Don't aggregate if
* any other options are detected.
*/
return -1;
if (tcp->doff == 8) {
- ptr = (u8 *)(tcp + 1);
+ ptr = (u8 *)(tcp + 1);
while (*ptr == TCPOPT_NOP)
ptr++;
if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
return -1;
/* timestamp echo reply should be non-zero */
- if (*((u32 *)(ptr+6)) == 0)
+ if (*((u32 *)(ptr+6)) == 0)
return -1;
}
/* global variables *********************************************************/
static SK_BOOL DoPrintInterfaceChange = SK_TRUE;
- extern struct ethtool_ops SkGeEthtoolOps;
+ extern const struct ethtool_ops SkGeEthtoolOps;
/* local variables **********************************************************/
static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}};
pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
pTxd->pMBuf = pMessage;
- if (pMessage->ip_summed == CHECKSUM_HW) {
+ if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
u16 hdrlen = pMessage->h.raw - pMessage->data;
u16 offset = hdrlen + pMessage->csum;
/*
** Does the HW need to evaluate checksum for TCP or UDP packets?
*/
- if (pMessage->ip_summed == CHECKSUM_HW) {
+ if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
u16 hdrlen = pMessage->h.raw - pMessage->data;
u16 offset = hdrlen + pMessage->csum;
#ifdef USE_SK_RX_CHECKSUM
pMsg->csum = pRxd->TcpSums & 0xffff;
- pMsg->ip_summed = CHECKSUM_HW;
+ pMsg->ip_summed = CHECKSUM_COMPLETE;
#else
pMsg->ip_summed = CHECKSUM_NONE;
#endif
static int __init skge_init(void)
{
- return pci_module_init(&skge_driver);
+ return pci_register_driver(&skge_driver);
}
static void __exit skge_exit(void)
#include "skge.h"
#define DRV_NAME "skge"
- #define DRV_VERSION "1.6"
+ #define DRV_VERSION "1.8"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
static int skge_up(struct net_device *dev);
static int skge_down(struct net_device *dev);
static void skge_phy_reset(struct skge_port *skge);
- static void skge_tx_clean(struct skge_port *skge);
+ static void skge_tx_clean(struct net_device *dev);
static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
static void genesis_get_stats(struct skge_port *skge, u64 *data);
static const int rxqaddr[] = { Q_R1, Q_R2 };
static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+ static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
static int skge_get_regs_len(struct net_device *dev)
{
return 0;
}
- static struct ethtool_ops skge_ethtool_ops = {
+ static const struct ethtool_ops skge_ethtool_ops = {
.get_settings = skge_get_settings,
.set_settings = skge_set_settings,
.get_drvinfo = skge_get_drvinfo,
/* Allocate buffers for receive ring
* For receive: to_clean is next received frame.
*/
- static int skge_rx_fill(struct skge_port *skge)
+ static int skge_rx_fill(struct net_device *dev)
{
+ struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
do {
struct sk_buff *skb;
- skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
+ skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
+ GFP_KERNEL);
if (!skb)
return -ENOMEM;
if (err)
goto free_pci_mem;
- err = skge_rx_fill(skge);
+ err = skge_rx_fill(dev);
if (err)
goto free_rx_ring;
skge_led(skge, LED_MODE_OFF);
netif_poll_disable(dev);
- skge_tx_clean(skge);
+ skge_tx_clean(dev);
skge_rx_clean(skge);
kfree(skge->rx_ring.start);
int i;
u32 control, len;
u64 map;
- unsigned long flags;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
- if (!spin_trylock_irqsave(&skge->tx_lock, flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
- if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
-
- printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
- dev->name);
- }
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
return NETDEV_TX_BUSY;
- }
e = skge->tx_ring.to_use;
td = e->desc;
td->dma_lo = map;
td->dma_hi = map >> 32;
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
int offset = skb->h.raw - skb->data;
/* This seems backwards, but it is what the sk98lin
netif_stop_queue(dev);
}
- spin_unlock_irqrestore(&skge->tx_lock, flags);
-
dev->trans_start = jiffies;
return NETDEV_TX_OK;
printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
skge->netdev->name, e - skge->tx_ring.start);
- dev_kfree_skb_any(e->skb);
+ dev_kfree_skb(e->skb);
}
e->skb = NULL;
}
/* Free all buffers in transmit ring */
- static void skge_tx_clean(struct skge_port *skge)
+ static void skge_tx_clean(struct net_device *dev)
{
+ struct skge_port *skge = netdev_priv(dev);
struct skge_element *e;
- unsigned long flags;
- spin_lock_irqsave(&skge->tx_lock, flags);
+ netif_tx_lock_bh(dev);
for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
skge_tx_free(skge, e, td->control);
}
skge->tx_ring.to_clean = e;
- netif_wake_queue(skge->netdev);
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ netif_wake_queue(dev);
+ netif_tx_unlock_bh(dev);
}
static void skge_tx_timeout(struct net_device *dev)
printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
- skge_tx_clean(skge);
+ skge_tx_clean(dev);
}
static int skge_change_mtu(struct net_device *dev, int new_mtu)
/* Get receive buffer from descriptor.
* Handles copy of small buffers and reallocation failures
*/
- static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
- struct skge_element *e,
- u32 control, u32 status, u16 csum)
+ static struct sk_buff *skge_rx_get(struct net_device *dev,
+ struct skge_element *e,
+ u32 control, u32 status, u16 csum)
{
+ struct skge_port *skge = netdev_priv(dev);
struct sk_buff *skb;
u16 len = control & BMU_BBC;
if (unlikely(netif_msg_rx_status(skge)))
printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- skge->netdev->name, e - skge->rx_ring.start,
+ dev->name, e - skge->rx_ring.start,
status, len);
if (len > skge->rx_buf_size)
goto error;
if (len < RX_COPY_THRESHOLD) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb(dev, len + 2);
if (!skb)
goto resubmit;
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
- nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
+ nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
if (!nskb)
goto resubmit;
}
skb_put(skb, len);
- skb->dev = skge->netdev;
if (skge->rx_csum) {
skb->csum = csum;
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- skb->protocol = eth_type_trans(skb, skge->netdev);
+ skb->protocol = eth_type_trans(skb, dev);
return skb;
error:
if (netif_msg_rx_err(skge))
printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
- skge->netdev->name, e - skge->rx_ring.start,
+ dev->name, e - skge->rx_ring.start,
control, status);
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
}
/* Free all buffers in Tx ring which are no longer owned by device */
- static void skge_txirq(struct net_device *dev)
+ static void skge_tx_done(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
- rmb();
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
- spin_lock(&skge->tx_lock);
+ netif_tx_lock(dev);
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
}
skge->tx_ring.to_clean = e;
- if (netif_queue_stopped(skge->netdev)
- && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
- netif_wake_queue(skge->netdev);
+ if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
+ netif_wake_queue(dev);
- spin_unlock(&skge->tx_lock);
+ netif_tx_unlock(dev);
}
static int skge_poll(struct net_device *dev, int *budget)
int to_do = min(dev->quota, *budget);
int work_done = 0;
+ skge_tx_done(dev);
+
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
if (control & BMU_OWN)
break;
- skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
+ skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
if (likely(skb)) {
dev->last_rx = jiffies;
netif_receive_skb(skb);
if (work_done >= to_do)
return 1; /* not done */
- netif_rx_complete(dev);
-
spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= rxirqmask[skge->port];
+ __netif_rx_complete(dev);
+ hw->intr_mask |= irqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
- mmiowb();
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
return 0;
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
}
{
struct skge_hw *hw = dev_id;
u32 status;
+ int handled = 0;
+ spin_lock(&hw->hw_lock);
/* Reading this register masks IRQ */
status = skge_read32(hw, B0_SP_ISRC);
- if (status == 0)
- return IRQ_NONE;
+ if (status == 0 || status == ~0)
+ goto out;
- spin_lock(&hw->hw_lock);
+ handled = 1;
status &= hw->intr_mask;
if (status & IS_EXT_REG) {
hw->intr_mask &= ~IS_EXT_REG;
schedule_work(&hw->phy_work);
}
- if (status & IS_XA1_F) {
- skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
- skge_txirq(hw->dev[0]);
- }
-
- if (status & IS_R1_F) {
- skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R1_F;
+ if (status & (IS_XA1_F|IS_R1_F)) {
+ hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
netif_rx_schedule(hw->dev[0]);
}
skge_mac_intr(hw, 0);
if (hw->dev[1]) {
- if (status & IS_XA2_F) {
- skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
- skge_txirq(hw->dev[1]);
- }
-
- if (status & IS_R2_F) {
- skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R2_F;
+ if (status & (IS_XA2_F|IS_R2_F)) {
+ hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
netif_rx_schedule(hw->dev[1]);
}
skge_error_irq(hw);
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ out:
spin_unlock(&hw->hw_lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
else
hw->ram_size = t8 * 4096;
- spin_lock_init(&hw->hw_lock);
hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
if (hw->ports > 1)
hw->intr_mask |= IS_PORT_2;
dev->poll_controller = skge_netpoll;
#endif
dev->irq = hw->pdev->irq;
- dev->features = NETIF_F_LLTX;
+
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
skge->port = port;
- spin_lock_init(&skge->tx_lock);
-
if (hw->chip_id != CHIP_ID_GENESIS) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
skge->rx_csum = 1;
hw->pdev = pdev;
mutex_init(&hw->phy_mutex);
INIT_WORK(&hw->phy_work, skge_extirq, hw);
+ spin_lock_init(&hw->hw_lock);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
goto err_out_free_hw;
}
- err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw);
- if (err) {
- printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
- pci_name(pdev), pdev->irq);
- goto err_out_iounmap;
- }
- pci_set_drvdata(pdev, hw);
-
err = skge_reset(hw);
if (err)
- goto err_out_free_irq;
+ goto err_out_iounmap;
printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
(unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
skge_board_name(hw), hw->chip_rev);
- if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
+ dev = skge_devinit(hw, 0, using_dac);
+ if (!dev)
goto err_out_led_off;
if (!is_valid_ether_addr(dev->dev_addr)) {
goto err_out_free_netdev;
}
-
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
goto err_out_free_netdev;
}
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
+ if (err) {
+ printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+ dev->name, pdev->irq);
+ goto err_out_unregister;
+ }
skge_show_addr(dev);
if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
free_netdev(dev1);
}
}
+ pci_set_drvdata(pdev, hw);
return 0;
+ err_out_unregister:
+ unregister_netdev(dev);
err_out_free_netdev:
free_netdev(dev);
err_out_led_off:
skge_write16(hw, B0_LED, LED_STAT_OFF);
- err_out_free_irq:
- free_irq(pdev->irq, hw);
err_out_iounmap:
iounmap(hw->regs);
err_out_free_hw:
spin_lock_irq(&hw->hw_lock);
hw->intr_mask = 0;
skge_write32(hw, B0_IMSK, 0);
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
skge_write16(hw, B0_LED, LED_STAT_OFF);
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, wol = 0;
- for (i = 0; i < 2; i++) {
+ pci_save_state(pdev);
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev) {
+ if (netif_running(dev)) {
struct skge_port *skge = netdev_priv(dev);
- if (netif_running(dev)) {
- netif_carrier_off(dev);
- if (skge->wol)
- netif_stop_queue(dev);
- else
- skge_down(dev);
- }
- netif_device_detach(dev);
+
+ netif_carrier_off(dev);
+ if (skge->wol)
+ netif_stop_queue(dev);
+ else
+ skge_down(dev);
wol |= skge->wol;
}
+ netif_device_detach(dev);
}
- pci_save_state(pdev);
+ skge_write32(hw, B0_IMSK, 0);
pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
- pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
static int skge_resume(struct pci_dev *pdev)
{
struct skge_hw *hw = pci_get_drvdata(pdev);
- int i;
+ int i, err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- skge_reset(hw);
+ err = skge_reset(hw);
+ if (err)
+ goto out;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev) {
- netif_device_attach(dev);
- if (netif_running(dev) && skge_up(dev))
+
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ err = skge_up(dev);
+
+ if (err) {
+ printk(KERN_ERR PFX "%s: could not up: %d\n",
+ dev->name, err);
dev_close(dev);
+ goto out;
+ }
}
}
- return 0;
+ out:
+ return err;
}
#endif
static int __init skge_init_module(void)
{
- return pci_module_init(&skge_driver);
+ return pci_register_driver(&skge_driver);
}
static void __exit skge_cleanup_module(void)
#include "sky2.h"
#define DRV_NAME "sky2"
- #define DRV_VERSION "1.5"
+ #define DRV_VERSION "1.7"
#define PFX DRV_NAME " "
/*
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
{ 0 }
};
static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
{
u16 power_control;
- u32 reg1;
int vaux;
pr_debug("sky2_set_power_state %d\n", state);
else
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
- /* Turn off phy power saving */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
-
- /* looks like this XL is back asswards .. */
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
- reg1 |= PCI_Y2_PHY1_COMA;
- if (hw->ports > 1)
- reg1 |= PCI_Y2_PHY2_COMA;
- }
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- udelay(100);
-
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ u32 reg1;
+
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
case PCI_D3hot:
case PCI_D3cold:
- /* Turn on phy power saving */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
- reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
- else
- reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- udelay(100);
-
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
else
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
- static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
+ static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
{
u16 reg;
static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
{
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
- u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
+ u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
if (sky2->autoneg == AUTONEG_ENABLE &&
!(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
}
ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
- if (hw->copper) {
+ if (sky2_is_copper(hw)) {
if (hw->chip_id == CHIP_ID_YUKON_FE) {
/* enable automatic crossover */
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
}
}
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
} else {
/* workaround for deviation #4.88 (CRC errors) */
/* disable Automatic Crossover */
ctrl &= ~PHY_M_PC_MDIX_MSK;
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+ }
- if (hw->chip_id == CHIP_ID_YUKON_XL) {
- /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
- gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
- ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
- ctrl &= ~PHY_M_MAC_MD_MSK;
- ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* special setup for PHY 88E1112 Fiber */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && !sky2_is_copper(hw)) {
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl &= ~PHY_M_MAC_MD_MSK;
+ ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ if (hw->pmd_type == 'P') {
/* select page 1 to access Fiber registers */
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
+
+ /* for SFP-module set SIGDET polarity to low */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_FIB_SIGD_POL;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
}
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
}
ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
ctrl = 0;
ct1000 = 0;
adv = PHY_AN_CSMA;
+ reg = 0;
if (sky2->autoneg == AUTONEG_ENABLE) {
- if (hw->copper) {
+ if (sky2_is_copper(hw)) {
if (sky2->advertising & ADVERTISED_1000baseT_Full)
ct1000 |= PHY_M_1000C_AFD;
if (sky2->advertising & ADVERTISED_1000baseT_Half)
adv |= PHY_M_AN_10_FD;
if (sky2->advertising & ADVERTISED_10baseT_Half)
adv |= PHY_M_AN_10_HD;
- } else /* special defines for FIBER (88E1011S only) */
- adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
+ } else { /* special defines for FIBER (88E1040S only) */
+ if (sky2->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_M_AN_1000X_AFD;
+ if (sky2->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_M_AN_1000X_AHD;
+ }
/* Set Flow-control capabilities */
if (sky2->tx_pause && sky2->rx_pause)
/* forced speed/duplex settings */
ct1000 = PHY_M_1000C_MSE;
- if (sky2->duplex == DUPLEX_FULL)
- ctrl |= PHY_CT_DUP_MD;
+ /* Disable auto update for duplex flow control and speed */
+ reg |= GM_GPCR_AU_ALL_DIS;
switch (sky2->speed) {
case SPEED_1000:
ctrl |= PHY_CT_SP1000;
+ reg |= GM_GPCR_SPEED_1000;
break;
case SPEED_100:
ctrl |= PHY_CT_SP100;
+ reg |= GM_GPCR_SPEED_100;
break;
}
+ if (sky2->duplex == DUPLEX_FULL) {
+ reg |= GM_GPCR_DUP_FULL;
+ ctrl |= PHY_CT_DUP_MD;
+ } else if (sky2->speed != SPEED_1000 && hw->chip_id != CHIP_ID_YUKON_EC_U) {
+ /* Turn off flow control for 10/100mbps */
+ sky2->rx_pause = 0;
+ sky2->tx_pause = 0;
+ }
+
+ if (!sky2->rx_pause)
+ reg |= GM_GPCR_FC_RX_DIS;
+
+ if (!sky2->tx_pause)
+ reg |= GM_GPCR_FC_TX_DIS;
+
+ /* Forward pause packets to GMAC? */
+ if (sky2->tx_pause || sky2->rx_pause)
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ else
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
ctrl |= PHY_CT_RESET;
}
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
if (hw->chip_id != CHIP_ID_YUKON_FE)
gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
}
+
/* Enable phy interrupt on auto-negotiation complete (or link up) */
if (sky2->autoneg == AUTONEG_ENABLE)
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
}
+ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
+ {
+ u32 reg1;
+ static const u32 phy_power[]
+ = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
+
+ /* looks like this XL is back asswards .. */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ onoff = !onoff;
+
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+
+ if (onoff)
+ /* Turn off phy power saving */
+ reg1 &= ~phy_power[port];
+ else
+ reg1 |= phy_power[port];
+
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
+ udelay(100);
+ }
+
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
}
- if (sky2->autoneg == AUTONEG_DISABLE) {
- reg = gma_read16(hw, port, GM_GP_CTRL);
- reg |= GM_GPCR_AU_ALL_DIS;
- gma_write16(hw, port, GM_GP_CTRL, reg);
- gma_read16(hw, port, GM_GP_CTRL);
-
- switch (sky2->speed) {
- case SPEED_1000:
- reg &= ~GM_GPCR_SPEED_100;
- reg |= GM_GPCR_SPEED_1000;
- break;
- case SPEED_100:
- reg &= ~GM_GPCR_SPEED_1000;
- reg |= GM_GPCR_SPEED_100;
- break;
- case SPEED_10:
- reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
- break;
- }
-
- if (sky2->duplex == DUPLEX_FULL)
- reg |= GM_GPCR_DUP_FULL;
-
- /* turn off pause in 10/100mbps half duplex */
- else if (sky2->speed != SPEED_1000 &&
- hw->chip_id != CHIP_ID_YUKON_EC_U)
- sky2->tx_pause = sky2->rx_pause = 0;
- } else
- reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
-
- if (!sky2->tx_pause && !sky2->rx_pause) {
- sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
- reg |=
- GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
- } else if (sky2->tx_pause && !sky2->rx_pause) {
- /* disable Rx flow-control */
- reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
- }
-
- gma_write16(hw, port, GM_GP_CTRL, reg);
-
sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+ /* Enable Transmit FIFO Underrun */
+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(hw, port);
spin_unlock_bh(&sky2->phy_lock);
/* Update chip's next pointer */
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
{
+ q = Y2_QADDR(q, PREF_UNIT_PUT_IDX);
wmb();
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
- mmiowb();
+ sky2_write16(hw, q, idx);
+ sky2_read16(hw, q);
}
struct sky2_rx_le *le;
le = sky2_next_rx(sky2);
- le->addr = (ETH_HLEN << 16) | ETH_HLEN;
+ le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
le->ctrl = 0;
le->opcode = OP_TCPSTART | HW_OWNER;
/*
* It appears the hardware has a bug in the FIFO logic that
* cause it to hang if the FIFO gets overrun and the receive buffer
- * is not aligned. ALso alloc_skb() won't align properly if slab
- * debugging is enabled.
+ * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
+ * aligned except if slab debugging is enabled.
*/
- static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
+ static inline struct sk_buff *sky2_alloc_skb(struct net_device *dev,
+ unsigned int length,
+ gfp_t gfp_mask)
{
struct sk_buff *skb;
- skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
+ skb = __netdev_alloc_skb(dev, length + RX_SKB_ALIGN, gfp_mask);
if (likely(skb)) {
unsigned long p = (unsigned long) skb->data;
skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
for (i = 0; i < sky2->rx_pending; i++) {
struct ring_info *re = sky2->rx_ring + i;
- re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
+ re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize,
+ GFP_KERNEL);
if (!re->skb)
goto nomem;
if (!sky2->rx_ring)
goto err_out;
+ sky2_phy_power(hw, port, 1);
+
sky2_mac_init(hw, port);
/* Determine available ram buffer space (in 4K blocks).
if (skb_is_gso(skb))
++count;
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
++count;
return count;
struct sky2_tx_le *le = NULL;
struct tx_ring_info *re;
unsigned i, len;
- int avail;
dma_addr_t mapping;
u32 addr64;
u16 mss;
/* Send high bits if changed or crosses boundary */
if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
le = get_tx_le(sky2);
- le->tx.addr = cpu_to_le32(addr64);
+ le->addr = cpu_to_le32(addr64);
le->ctrl = 0;
le->opcode = OP_ADDR64 | HW_OWNER;
sky2->tx_addr64 = high32(mapping + len);
/* Check for TCP Segmentation Offload */
mss = skb_shinfo(skb)->gso_size;
if (mss != 0) {
- /* just drop the packet if non-linear expansion fails */
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- dev_kfree_skb(skb);
- goto out_unlock;
- }
-
mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
mss += ETH_HLEN;
- }
- if (mss != sky2->tx_last_mss) {
- le = get_tx_le(sky2);
- le->tx.tso.size = cpu_to_le16(mss);
- le->tx.tso.rsvd = 0;
- le->opcode = OP_LRGLEN | HW_OWNER;
- le->ctrl = 0;
- sky2->tx_last_mss = mss;
+ if (mss != sky2->tx_last_mss) {
+ le = get_tx_le(sky2);
+ le->addr = cpu_to_le32(mss);
+ le->opcode = OP_LRGLEN | HW_OWNER;
+ le->ctrl = 0;
+ sky2->tx_last_mss = mss;
+ }
}
ctrl = 0;
if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
if (!le) {
le = get_tx_le(sky2);
- le->tx.addr = 0;
+ le->addr = 0;
le->opcode = OP_VLAN|HW_OWNER;
le->ctrl = 0;
} else
#endif
/* Handle TCP checksum offload */
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u16 hdr = skb->h.raw - skb->data;
- u16 offset = hdr + skb->csum;
+ unsigned offset = skb->h.raw - skb->data;
+ u32 tcpsum;
+
+ tcpsum = offset << 16; /* sum start */
+ tcpsum |= offset + skb->csum; /* sum write */
ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
if (skb->nh.iph->protocol == IPPROTO_UDP)
ctrl |= UDPTCP;
- le = get_tx_le(sky2);
- le->tx.csum.start = cpu_to_le16(hdr);
- le->tx.csum.offset = cpu_to_le16(offset);
- le->length = 0; /* initial checksum value */
- le->ctrl = 1; /* one packet */
- le->opcode = OP_TCPLISW | HW_OWNER;
+ if (tcpsum != sky2->tx_tcpsum) {
+ sky2->tx_tcpsum = tcpsum;
+
+ le = get_tx_le(sky2);
+ le->addr = cpu_to_le32(tcpsum);
+ le->length = 0; /* initial checksum value */
+ le->ctrl = 1; /* one packet */
+ le->opcode = OP_TCPLISW | HW_OWNER;
+ }
}
le = get_tx_le(sky2);
- le->tx.addr = cpu_to_le32((u32) mapping);
+ le->addr = cpu_to_le32((u32) mapping);
le->length = cpu_to_le16(len);
le->ctrl = ctrl;
le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
addr64 = high32(mapping);
if (addr64 != sky2->tx_addr64) {
le = get_tx_le(sky2);
- le->tx.addr = cpu_to_le32(addr64);
+ le->addr = cpu_to_le32(addr64);
le->ctrl = 0;
le->opcode = OP_ADDR64 | HW_OWNER;
sky2->tx_addr64 = addr64;
}
le = get_tx_le(sky2);
- le->tx.addr = cpu_to_le32((u32) mapping);
+ le->addr = cpu_to_le32((u32) mapping);
le->length = cpu_to_le16(frag->size);
le->ctrl = ctrl;
le->opcode = OP_BUFFER | HW_OWNER;
fre = sky2->tx_ring
- + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
+ + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
pci_unmap_addr_set(fre, mapaddr, mapping);
}
re->idx = sky2->tx_prod;
le->ctrl |= EOP;
- avail = tx_avail(sky2);
- if (mss != 0 || avail < TX_MIN_PENDING) {
- le->ctrl |= FRC_STAT;
- if (avail <= MAX_SKB_TX_LE)
- netif_stop_queue(dev);
- }
+ if (tx_avail(sky2) <= MAX_SKB_TX_LE)
+ netif_stop_queue(dev);
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
- out_unlock:
spin_unlock(&sky2->tx_lock);
dev->trans_start = jiffies;
/* Stop more packets from being queued */
netif_stop_queue(dev);
- sky2_phy_reset(hw, port);
+ sky2_gmac_reset(hw, port);
/* Stop transmitter */
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_phy_power(hw, port, 0);
+
/* turn off LED's */
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
{
- if (!hw->copper)
+ if (!sky2_is_copper(hw))
return SPEED_1000;
if (hw->chip_id == CHIP_ID_YUKON_FE)
unsigned port = sky2->port;
u16 reg;
- /* Enable Transmit FIFO Underrun */
- sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
-
- reg = gma_read16(hw, port, GM_GP_CTRL);
- if (sky2->autoneg == AUTONEG_DISABLE) {
- reg |= GM_GPCR_AU_ALL_DIS;
-
- /* Is write/read necessary? Copied from sky2_mac_init */
- gma_write16(hw, port, GM_GP_CTRL, reg);
- gma_read16(hw, port, GM_GP_CTRL);
-
- switch (sky2->speed) {
- case SPEED_1000:
- reg &= ~GM_GPCR_SPEED_100;
- reg |= GM_GPCR_SPEED_1000;
- break;
- case SPEED_100:
- reg &= ~GM_GPCR_SPEED_1000;
- reg |= GM_GPCR_SPEED_100;
- break;
- case SPEED_10:
- reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
- break;
- }
- } else
- reg &= ~GM_GPCR_AU_ALL_DIS;
-
- if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
- reg |= GM_GPCR_DUP_FULL;
-
/* enable Rx/Tx */
+ reg = gma_read16(hw, port, GM_GP_CTRL);
reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
gma_write16(hw, port, GM_GP_CTRL, reg);
- gma_read16(hw, port, GM_GP_CTRL);
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
reg = gma_read16(hw, port, GM_GP_CTRL);
reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
gma_write16(hw, port, GM_GP_CTRL, reg);
- gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
if (sky2->rx_pause && !sky2->tx_pause) {
/* restore Asymmetric Pause bit */
if (netif_msg_link(sky2))
printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
+
sky2_phy_init(hw, port);
}
sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
- if ((sky2->tx_pause || sky2->rx_pause)
- && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
+ if (sky2->duplex == DUPLEX_HALF && sky2->speed != SPEED_1000
+ && hw->chip_id != CHIP_ID_YUKON_EC_U)
+ sky2->rx_pause = sky2->tx_pause = 0;
+
+ if (sky2->rx_pause || sky2->tx_pause)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
else
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
sky2->netdev->name, istatus, phystat);
- if (istatus & PHY_M_IS_AN_COMPL) {
+ if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) {
if (sky2_autoneg_done(sky2, phystat) == 0)
sky2_link_up(sky2);
goto out;
* For small packets or errors, just reuse existing skb.
* For larger packets, get new buffer.
*/
- static struct sk_buff *sky2_receive(struct sky2_port *sky2,
+ static struct sk_buff *sky2_receive(struct net_device *dev,
u16 length, u32 status)
{
+ struct sky2_port *sky2 = netdev_priv(dev);
struct ring_info *re = sky2->rx_ring + sky2->rx_next;
struct sk_buff *skb = NULL;
if (unlikely(netif_msg_rx_status(sky2)))
printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
- sky2->netdev->name, sky2->rx_next, status, length);
+ dev->name, sky2->rx_next, status, length);
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
if (!(status & GMR_FS_RX_OK))
goto resubmit;
- if (length > sky2->netdev->mtu + ETH_HLEN)
+ if (length > dev->mtu + ETH_HLEN)
goto oversize;
if (length < copybreak) {
- skb = alloc_skb(length + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb(dev, length + 2);
if (!skb)
goto resubmit;
} else {
struct sk_buff *nskb;
- nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
+ nskb = sky2_alloc_skb(dev, sky2->rx_bufsize, GFP_ATOMIC);
if (!nskb)
goto resubmit;
if (netif_msg_rx_err(sky2) && net_ratelimit())
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
- sky2->netdev->name, status, length);
+ dev->name, status, length);
if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
sky2->net_stats.rx_length_errors++;
}
}
- /* Is status ring empty or is there more to do? */
- static inline int sky2_more_work(const struct sky2_hw *hw)
- {
- return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
- }
-
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
dev = hw->dev[le->link];
sky2 = netdev_priv(dev);
- length = le->length;
- status = le->status;
+ length = le16_to_cpu(le->length);
+ status = le32_to_cpu(le->status);
switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT:
- skb = sky2_receive(sky2, length, status);
+ skb = sky2_receive(dev, length, status);
if (!skb)
break;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
dev->last_rx = jiffies;
#endif
case OP_RXCHKS:
skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = le16_to_cpu(status);
+ skb->csum = status & 0xffff;
break;
case OP_TXINDEXLE:
}
}
+ /* Fully processed status ring so clear irq */
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
exit_loop:
if (buf_write[0]) {
sky2 = netdev_priv(hw->dev[0]);
sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
work_done = sky2_status_intr(hw, work_limit);
- *budget -= work_done;
- dev0->quota -= work_done;
-
- if (status & Y2_IS_STAT_BMU)
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+ if (work_done < work_limit) {
+ netif_rx_complete(dev0);
- if (sky2_more_work(hw))
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ return 0;
+ } else {
+ *budget -= work_done;
+ dev0->quota -= work_done;
return 1;
-
- netif_rx_complete(dev0);
-
- sky2_read32(hw, B0_Y2_SP_LISR);
- return 0;
+ }
}
static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
static int sky2_reset(struct sky2_hw *hw)
{
u16 status;
- u8 t8, pmd_type;
+ u8 t8;
int i;
sky2_write8(hw, B0_CTST, CS_RST_CLR);
sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
- pmd_type = sky2_read8(hw, B2_PMD_TYP);
- hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
-
+ hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->ports = 1;
t8 = sky2_read8(hw, B2_Y2_HW_RES);
if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
for (i = 0; i < hw->ports; i++)
- sky2_phy_reset(hw, i);
+ sky2_gmac_reset(hw, i);
memset(hw->st_le, 0, STATUS_LE_BYTES);
hw->st_idx = 0;
static u32 sky2_supported_modes(const struct sky2_hw *hw)
{
- u32 modes;
- if (hw->copper) {
- modes = SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_TP;
+ if (sky2_is_copper(hw)) {
+ u32 modes = SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_TP;
if (hw->chip_id != CHIP_ID_YUKON_FE)
modes |= SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full;
+ | SUPPORTED_1000baseT_Full;
+ return modes;
} else
- modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
- | SUPPORTED_Autoneg;
- return modes;
+ return SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_FIBRE;
}
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
ecmd->supported = sky2_supported_modes(hw);
ecmd->phy_address = PHY_ADDR_MARV;
- if (hw->copper) {
+ if (sky2_is_copper(hw)) {
ecmd->supported = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
| SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg | SUPPORTED_TP;
ecmd->port = PORT_TP;
- } else
+ ecmd->speed = sky2->speed;
+ } else {
+ ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
+ }
ecmd->advertising = sky2->advertising;
ecmd->autoneg = sky2->autoneg;
- ecmd->speed = sky2->speed;
ecmd->duplex = sky2->duplex;
return 0;
}
struct ethtool_pauseparam *ecmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int err = 0;
sky2->autoneg = ecmd->autoneg;
sky2->tx_pause = ecmd->tx_pause != 0;
sky2_phy_reinit(sky2);
- return err;
+ return 0;
}
static int sky2_get_coalesce(struct net_device *dev,
regs->len - B3_RI_WTO_R1);
}
- static struct ethtool_ops sky2_ethtool_ops = {
+ static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
.set_settings = sky2_set_settings,
.get_drvinfo = sky2_get_drvinfo,
struct pci_dev *pdev = hw->pdev;
int err;
+ init_waitqueue_head (&hw->msi_wait);
+
sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
return err;
}
- init_waitqueue_head (&hw->msi_wait);
-
sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
- wmb();
+ sky2_read8(hw, B0_CTST);
wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
hw->pm_cap = pm_cap;
#ifdef __BIG_ENDIAN
- /* byte swap descriptors in hardware */
+ /* The sk98lin vendor driver uses hardware byte swapping but
+ * this driver uses software swapping.
+ */
{
u32 reg;
-
reg = sky2_pci_read32(hw, PCI_DEV_REG2);
- reg |= PCI_REV_DESC;
+ reg &= ~PCI_REV_DESC;
sky2_pci_write32(hw, PCI_DEV_REG2, reg);
}
#endif
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
static void netdev_media_change(struct net_device *dev);
- static struct ethtool_ops ethtool_ops;
+ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
}
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
return NETDEV_TX_OK;
}
status |= TxDescIntr;
np->reap_tx = 0;
}
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
status |= TxCalTCP;
np->stats.tx_compressed++;
}
* Until then, the printk stays. :-) -Ion
*/
else if (le16_to_cpu(desc->status2) & 0x0040) {
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = le16_to_cpu(desc->csum);
printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
}
debug = val;
}
- static struct ethtool_ops ethtool_ops = {
+ static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = get_drvinfo,
.get_settings = get_settings,
static int starfire_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
-
+
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return -ENODEV;
}
- return pci_module_init (&starfire_driver);
+ return pci_register_driver(&starfire_driver);
}
* sungem.c: Sun GEM ethernet driver.
*
- *
+ *
* Support for Apple GMAC and assorted PHYs, WOL, Power Management
* (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
*
* NAPI and NETPOLL support
- *
- * TODO:
+ *
+ * TODO:
* - Now that the driver was significantly simplified, I need to rework
* the locking. I'm sure we don't need _2_ spinlocks, and we probably
* can avoid taking most of them for so long period of time (and schedule
* instead). The main issues at this point are caused by the netdev layer
* though:
- *
+ *
* gem_change_mtu() and gem_set_multicast() are called with a read_lock()
* help by net/core/dev.c, thus they can't schedule. That means they can't
* call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock
/* These models only differ from the original GEM in
* that their tx/rx fifos are of a different size and
* they only support 10/100 speeds. -DaveM
- *
+ *
* Apple's GMAC does support gigabit on machines with
* the BCM54xx PHYs. -BenH
*/
}
skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
skb->protocol = eth_type_trans(skb, gp->dev);
netif_receive_skb(skb);
unsigned long flags;
/*
- * NAPI locking nightmare: See comment at head of driver
+ * NAPI locking nightmare: See comment at head of driver
*/
spin_lock_irqsave(&gp->lock, flags);
spin_unlock_irqrestore(&gp->lock, flags);
- /* Run RX thread. We don't use any locking here,
- * code willing to do bad things - like cleaning the
+ /* Run RX thread. We don't use any locking here,
+ * code willing to do bad things - like cleaning the
* rx ring - must call netif_poll_disable(), which
* schedule_timeout()'s if polling is already disabled.
*/
return 1;
spin_lock_irqsave(&gp->lock, flags);
-
+
gp->status = readl(gp->regs + GREG_STAT);
} while (gp->status & GREG_STAT_NAPI);
return IRQ_HANDLED;
spin_lock_irqsave(&gp->lock, flags);
-
+
if (netif_rx_schedule_prep(dev)) {
u32 gem_status = readl(gp->regs + GREG_STAT);
}
spin_unlock_irqrestore(&gp->lock, flags);
-
+
/* If polling was disabled at the time we received that
- * interrupt, we may return IRQ_HANDLED here while we
+ * interrupt, we may return IRQ_HANDLED here while we
* should return IRQ_NONE. No big deal...
*/
return IRQ_HANDLED;
unsigned long flags;
ctrl = 0;
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
u64 csum_start_off, csum_stuff_off;
csum_start_off = (u64) (skb->h.raw - skb->data);
this_ctrl = ctrl;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_ctrl |= TXDCTRL_EOF;
-
+
txd = &gp->init_block->txd[entry];
txd->buffer = cpu_to_le64(mapping);
wmb();
static void gem_start_dma(struct gem *gp)
{
u32 val;
-
+
/* We are ready to rock, turn everything on. */
val = readl(gp->regs + TXDMA_CFG);
writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
autoneg = gp->want_autoneg;
speed = gp->phy_mii.speed;
duplex = gp->phy_mii.duplex;
-
+
/* Setup link parameters */
if (!ep)
goto start_aneg;
duplex = DUPLEX_HALF;
if (speed == 0)
speed = SPEED_10;
-
+
/* If we are asleep, we don't try to actually setup the PHY, we
* just store the settings
*/
val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
} else {
/* MAC_TXCFG_NBO must be zero. */
- }
+ }
writel(val, gp->regs + MAC_TXCFG);
val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
{
struct gem *gp = (struct gem *) data;
int restart_aneg = 0;
-
+
if (gp->asleep)
return;
*/
if (gp->reset_task_pending)
goto restart;
-
+
if (gp->phy_type == phy_serialink ||
gp->phy_type == phy_serdes) {
u32 val = readl(gp->regs + PCS_MIISTAT);
mifcfg = readl(gp->regs + MIF_CFG);
mifcfg &= ~MIF_CFG_BBMODE;
writel(mifcfg, gp->regs + MIF_CFG);
-
+
if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
int i;
{
u32 rxcfg = 0;
int i;
-
+
if ((gp->dev->flags & IFF_ALLMULTI) ||
(gp->dev->mc_count > 256)) {
for (i=0; i<16; i++)
cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
writel(cfg, gp->regs + GREG_CFG);
- }
+ }
}
static int gem_check_invariants(struct gem *gp)
/* Determine initial PHY interface type guess. MDIO1 is the
* external PHY and thus takes precedence over MDIO0.
*/
-
+
if (mif_cfg & MIF_CFG_MDI1) {
gp->phy_type = phy_mii_mdio1;
mif_cfg |= MIF_CFG_PSELECT;
/* Setup wake-on-lan for MAGIC packet */
writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
- gp->regs + MAC_RXCFG);
+ gp->regs + MAC_RXCFG);
writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
gem_reset(gp);
gem_clean_rings(gp);
gem_put_cell(gp);
-
+
spin_unlock(&gp->tx_lock);
spin_unlock_irqrestore(&gp->lock, flags);
mutex_lock(&gp->pm_mutex);
- gp->opened = 0;
+ gp->opened = 0;
if (!gp->asleep)
gem_do_stop(dev, 0);
mutex_unlock(&gp->pm_mutex);
-
+
return 0;
}
printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
dev->name,
(gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
-
+
/* Keep the cell enabled during the entire operation */
spin_lock_irqsave(&gp->lock, flags);
spin_lock(&gp->tx_lock);
spin_unlock_irqrestore(&gp->lock, flags);
netif_poll_enable(dev);
-
+
mutex_unlock(&gp->pm_mutex);
return 0;
struct gem *gp = dev->priv;
u32 rxcfg, rxcfg_new;
int limit = 10000;
-
+
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
rxcfg_new |= MAC_RXCFG_SFCS;
#endif
gp->mac_rx_cfg = rxcfg_new;
-
+
writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
if (!limit--)
static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct gem *gp = dev->priv;
-
+
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(gp->pdev));
}
-
+
static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct gem *gp = dev->priv;
spin_lock_irq(&gp->lock);
cmd->autoneg = gp->want_autoneg;
cmd->speed = gp->phy_mii.speed;
- cmd->duplex = gp->phy_mii.duplex;
+ cmd->duplex = gp->phy_mii.duplex;
cmd->advertising = gp->phy_mii.advertising;
/* If we started with a forced mode, we don't have a default
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
-
+
/* Apply settings and restart link process. */
spin_lock_irq(&gp->lock);
gem_get_cell(gp);
struct gem *gp = dev->priv;
return gp->msg_enable;
}
-
+
static void gem_set_msglevel(struct net_device *dev, u32 value)
{
struct gem *gp = dev->priv;
return 0;
}
- static struct ethtool_ops gem_ethtool_ops = {
+ static const struct ethtool_ops gem_ethtool_ops = {
.get_drvinfo = gem_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_settings = gem_get_settings,
* with power management.
*/
mutex_lock(&gp->pm_mutex);
-
+
spin_lock_irqsave(&gp->lock, flags);
gem_get_cell(gp);
spin_unlock_irqrestore(&gp->lock, flags);
}
break;
};
-
+
spin_lock_irqsave(&gp->lock, flags);
gem_put_cell(gp);
spin_unlock_irqrestore(&gp->lock, flags);
mutex_unlock(&gp->pm_mutex);
-
+
return rc;
}
}
pci_using_dac = 0;
}
-
+
gemreg_base = pci_resource_start(pdev, 0);
gemreg_len = pci_resource_len(pdev, 0);
gp->link_timer.data = (unsigned long) gp;
INIT_WORK(&gp->reset_task, gem_reset_task, gp);
-
+
gp->lstate = link_down;
gp->timer_ticks = 0;
netif_carrier_off(dev);
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1)
- printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
+ printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
gp->phy_mii.def ? gp->phy_mii.def->name : "no");
/* GEM can do it all... */
static int __init gem_init(void)
{
- return pci_module_init(&gem_driver);
+ return pci_register_driver(&gem_driver);
}
static void __exit gem_cleanup(void)
unsigned short value)
{
int tries = TCVR_WRITE_TRIES;
-
+
ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
/* Welcome to Sun Microsystems, can I take your order please? */
* flags, thus:
*
* skb->csum = rxd->rx_flags & 0xffff;
- * skb->ip_summed = CHECKSUM_HW;
+ * skb->ip_summed = CHECKSUM_COMPLETE;
*
* before sending off the skb to the protocols, and we are good as gold.
*/
static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
{
int reset = 0;
-
+
/* Only print messages for non-counter related interrupts. */
if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
/* This card is _fucking_ hot... */
skb->csum = ntohs(csum ^ 0xffff);
- skb->ip_summed = CHECKSUM_HW;
+ skb->ip_summed = CHECKSUM_COMPLETE;
RXD(("len=%d csum=%4x]", len, csum));
skb->protocol = eth_type_trans(skb, dev);
u32 tx_flags;
tx_flags = TXFLAG_OWN;
- if (skb->ip_summed == CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
u32 csum_start_off, csum_stuff_off;
csum_start_off = (u32) (skb->h.raw - skb->data);
return (hp->sw_bmsr & BMSR_LSTATUS);
}
- static struct ethtool_ops hme_ethtool_ops = {
+ static const struct ethtool_ops hme_ethtool_ops = {
.get_settings = hme_get_settings,
.set_settings = hme_set_settings,
.get_drvinfo = hme_get_drvinfo,
printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n");
return -ENODEV;
}
-
+
strcpy(prom_name, pcp->prom_node->name);
#else
if (is_quattro_p(pdev))
hp->qfe_parent = qp;
hp->qfe_ent = qfe_slot;
qp->happy_meals[qfe_slot] = dev;
- }
+ }
hpreg_res = pci_resource_start(pdev, 0);
err = -ENODEV;
get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
#endif
}
-
+
/* Layout registers. */
hp->gregs = (hpreg_base + 0x0000UL);
hp->etxregs = (hpreg_base + 0x2000UL);
qpdev->device == PCI_DEVICE_ID_DEC_21153)
printk("DEC 21153 PCI Bridge\n");
else
- printk("unknown bridge %04x.%04x\n",
+ printk("unknown bridge %04x.%04x\n",
qpdev->vendor, qpdev->device);
}
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
static struct pci_device_id tg3_pci_tbl[] = {
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
+ {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
+ {}
};
MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
-static struct {
+static const struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[TG3_NUM_STATS] = {
{ "rx_octets" },
{ "nic_tx_threshold_hit" }
};
-static struct {
+static const struct {
const char string[ETH_GSTRING_LEN];
} ethtool_test_keys[TG3_NUM_TEST] = {
{ "nvram test (online) " },
static u32 tg3_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off));
+ return (readl(tp->regs + off));
}
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
/* tg3_restart_ints
* similar to tg3_enable_ints, but it accurately determines whether there
* is new work pending and can return without flushing the PIO write
- * which reenables interrupts
+ * which reenables interrupts
*/
static void tg3_restart_ints(struct tg3 *tp)
{
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
frame_val |= (MI_COM_CMD_READ | MI_COM_START);
-
+
tw32_f(MAC_MI_COM, frame_val);
loops = PHY_BUSY_LOOPS;
MI_COM_REG_ADDR_MASK);
frame_val |= (val & MI_COM_DATA_MASK);
frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
-
+
tw32_f(MAC_MI_COM, frame_val);
loops = PHY_BUSY_LOOPS;
if (old_rx_mode != tp->rx_mode) {
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
-
+
if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
else
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
u32 flags;
int i;
-
+
if (fiber_autoneg(tp, &flags)) {
u32 local_adv, remote_adv;
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
- if (len > RX_COPY_THRESHOLD
+ if (len > RX_COPY_THRESHOLD
&& tp->rx_offset == 2
/* rx_offset != 2 iff this is a 5701 card running
* in PCI-X mode [see tg3_get_invariants()] */
skb->h.th->check = 0;
}
- else if (skb->ip_summed == CHECKSUM_HW)
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
#else
mss = 0;
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
#endif
#if TG3_VLAN_TAG_USED
entry = tp->tx_prod;
base_flags = 0;
- if (skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
#if TG3_TSO_SUPPORT != 0
mss = 0;
#define TG3_FW_BSS_ADDR 0x08000a70
#define TG3_FW_BSS_LEN 0x10
-static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
+static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
};
-static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
+static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
struct fw_info {
unsigned int text_base;
unsigned int text_len;
- u32 *text_data;
+ const u32 *text_data;
unsigned int rodata_base;
unsigned int rodata_len;
- u32 *rodata_data;
+ const u32 *rodata_data;
unsigned int data_base;
unsigned int data_len;
- u32 *data_data;
+ const u32 *data_data;
};
/* tp->lock is held. */
#define TG3_TSO_FW_BSS_ADDR 0x08001b80
#define TG3_TSO_FW_BSS_LEN 0x894
-static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
+static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
};
-static u32 tg3TsoFwRodata[] = {
+static const u32 tg3TsoFwRodata[] = {
0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
0x00000000,
};
-static u32 tg3TsoFwData[] = {
+static const u32 tg3TsoFwData[] = {
0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000,
#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
#define TG3_TSO5_FW_BSS_LEN 0x88
-static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
+static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
0x00000000, 0x00000000, 0x00000000,
};
-static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
+static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
0x00000000, 0x00000000, 0x00000000,
};
-static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
+static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
0x00000000, 0x00000000, 0x00000000,
};
tg3_disable_ints(tp);
free_irq(tp->pdev->irq, dev);
-
+
err = tg3_request_irq(tp);
if (err)
get_stat64(&hw_stats->rx_ucast_packets) +
get_stat64(&hw_stats->rx_mcast_packets) +
get_stat64(&hw_stats->rx_bcast_packets);
-
+
stats->tx_packets = old_stats->tx_packets +
get_stat64(&hw_stats->tx_ucast_packets) +
get_stat64(&hw_stats->tx_mcast_packets) +
return 0;
}
- static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tg3 *tp = netdev_priv(dev);
-
+
cmd->supported = (SUPPORTED_Autoneg);
if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
cmd->supported |= SUPPORTED_FIBRE;
cmd->port = PORT_FIBRE;
}
-
+
cmd->advertising = tp->link_config.advertising;
if (netif_running(dev)) {
cmd->speed = tp->link_config.active_speed;
cmd->maxrxpkt = 0;
return 0;
}
-
+
static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tg3 *tp = netdev_priv(dev);
-
- if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
+
+ if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
/* These are the only valid advertisement bits allowed. */
if (cmd->autoneg == AUTONEG_ENABLE &&
(cmd->advertising & ~(ADVERTISED_1000baseT_Half |
tp->link_config.speed = cmd->speed;
tp->link_config.duplex = cmd->duplex;
}
-
+
if (netif_running(dev))
tg3_setup_phy(tp, 1);
tg3_full_unlock(tp);
-
+
return 0;
}
-
+
static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tg3 *tp = netdev_priv(dev);
-
+
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
strcpy(info->fw_version, tp->fw_ver);
strcpy(info->bus_info, pci_name(tp->pdev));
}
-
+
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct tg3 *tp = netdev_priv(dev);
-
+
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
wol->wolopts = WAKE_MAGIC;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-
+
static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct tg3 *tp = netdev_priv(dev);
-
+
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if ((wol->wolopts & WAKE_MAGIC) &&
tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
!(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
return -EINVAL;
-
+
spin_lock_bh(&tp->lock);
if (wol->wolopts & WAKE_MAGIC)
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
spin_unlock_bh(&tp->lock);
-
+
return 0;
}
-
+
static u32 tg3_get_msglevel(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
return tp->msg_enable;
}
-
+
static void tg3_set_msglevel(struct net_device *dev, u32 value)
{
struct tg3 *tp = netdev_priv(dev);
tp->msg_enable = value;
}
-
+
#if TG3_TSO_SUPPORT != 0
static int tg3_set_tso(struct net_device *dev, u32 value)
{
return ethtool_op_set_tso(dev, value);
}
#endif
-
+
static int tg3_nway_reset(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
u32 bmcr;
int r;
-
+
if (!netif_running(dev))
return -EAGAIN;
r = 0;
}
spin_unlock_bh(&tp->lock);
-
+
return r;
}
-
+
static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
-
+
ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
ering->rx_mini_max_pending = 0;
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
ering->tx_pending = tp->tx_pending;
}
-
+
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
int irq_sync = 0, err = 0;
-
+
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
(ering->tx_pending > TG3_TX_RING_SIZE - 1))
return -EINVAL;
-
+
if (netif_running(dev)) {
tg3_netif_stop(tp);
irq_sync = 1;
}
tg3_full_lock(tp, irq_sync);
-
+
tp->rx_pending = ering->rx_pending;
if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
}
tg3_full_unlock(tp);
-
+
return err;
}
-
+
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
-
+
epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
}
-
+
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
int irq_sync = 0, err = 0;
-
+
if (netif_running(dev)) {
tg3_netif_stop(tp);
irq_sync = 1;
}
tg3_full_unlock(tp);
-
+
return err;
}
-
+
static u32 tg3_get_rx_csum(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
}
-
+
static int tg3_set_rx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
-
+
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
return 0;
}
-
+
spin_lock_bh(&tp->lock);
if (data)
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
spin_unlock_bh(&tp->lock);
-
+
return 0;
}
-
+
static int tg3_set_tx_csum(struct net_device *dev, u32 data)
{
struct tg3 *tp = netdev_priv(dev);
-
+
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
return 0;
}
-
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
ethtool_op_set_tx_hw_csum(dev, data);
LED_CTRL_TRAFFIC_OVERRIDE |
LED_CTRL_TRAFFIC_BLINK |
LED_CTRL_TRAFFIC_LED);
-
+
else
tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
LED_CTRL_TRAFFIC_OVERRIDE);
0x00000000, 0xffff0002 },
{ RCVDBDI_STD_BD+0xc, 0x0000,
0x00000000, 0xffffffff },
-
+
/* Receive BD Initiator Control Registers. */
{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
0x00000000, 0xffffffff },
0x00000000, 0x000003ff },
{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
0x00000000, 0xffffffff },
-
+
/* Host Coalescing Control Registers. */
{ HOSTCC_MODE, TG3_FL_NOT_5705,
0x00000000, 0x00000004 },
0xffffffff, 0x00000000 },
{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
0xffffffff, 0x00000000 },
-
+
/* Mailbox Registers */
{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
0x00000000, 0x000001ff },
mem_tbl[i].len)) != 0)
break;
}
-
+
return err;
}
goto out;
}
err = 0;
-
+
/* tg3_free_rings will unmap and free the rx_skb */
out:
return err;
return 0;
}
- static struct ethtool_ops tg3_ethtool_ops = {
+ static const struct ethtool_ops tg3_ethtool_ops = {
.get_settings = tg3_get_settings,
.set_settings = tg3_set_settings,
.get_drvinfo = tg3_get_drvinfo,
tp->nvram_size = cursize;
}
-
+
static void __devinit tg3_get_nvram_size(struct tg3 *tp)
{
u32 val;
(addr & EEPROM_ADDR_ADDR_MASK) |
EEPROM_ADDR_START |
EEPROM_ADDR_WRITE);
-
+
for (j = 0; j < 10000; j++) {
val = tr32(GRC_EEPROM_ADDR);
u32 phy_addr, page_off, size;
phy_addr = offset & ~pagemask;
-
+
for (j = 0; j < pagesize; j += 4) {
if ((ret = tg3_nvram_read(tp, phy_addr + j,
(u32 *) (tmp + j))))
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
(bmsr & BMSR_LSTATUS))
goto skip_phy_reset;
-
+
err = tg3_phy_reset(tp);
if (err)
return err;
* When the flag is set, it means that GPIO1 is used for eeprom
* write protect and also implies that it is a LOM where GPIOs
* are not used to switch power.
- */
+ */
tg3_get_eeprom_hw_cfg(tp);
/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
static int __init tg3_init(void)
{
- return pci_module_init(&tg3_driver);
+ return pci_register_driver(&tg3_driver);
}
static void __exit tg3_cleanup(void)
#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
/* The 3XP will preload and remove 64 entries from the free buffer
- * list, and we need one entry to keep the ring from wrapping, so
+ * list, and we need one entry to keep the ring from wrapping, so
* to keep this a power of two, we use 128 entries.
*/
#define RXFREE_ENTRIES 128
#define PKT_BUF_SZ 1536
#define DRV_MODULE_NAME "typhoon"
- #define DRV_MODULE_VERSION "1.5.7"
- #define DRV_MODULE_RELDATE "05/01/07"
+ #define DRV_MODULE_VERSION "1.5.8"
+ #define DRV_MODULE_RELDATE "06/11/09"
#define PFX DRV_MODULE_NAME ": "
#define ERR_PFX KERN_ERR PFX
struct typhoon {
/* Tx cache line section */
- struct transmit_ring txLoRing ____cacheline_aligned;
+ struct transmit_ring txLoRing ____cacheline_aligned;
struct pci_dev * tx_pdev;
void __iomem *tx_ioaddr;
u32 txlo_dma_addr;
first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
first_txd->processFlags = 0;
- if(skb->ip_summed == CHECKSUM_HW) {
+ if(skb->ip_summed == CHECKSUM_PARTIAL) {
/* The 3XP will figure out if this is UDP/TCP */
first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
} else if((dev->mc_count > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
} else {
u32 sleep_ver = xp_resp[0].parm2;
snprintf(info->fw_version, 32, "%02x.%03x.%03x",
- sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
+ sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
sleep_ver & 0xfff);
}
}
ering->tx_pending = TXLO_ENTRIES - 1;
}
- static struct ethtool_ops typhoon_ethtool_ops = {
+ static const struct ethtool_ops typhoon_ethtool_ops = {
.get_settings = typhoon_get_settings,
.set_settings = typhoon_set_settings,
.get_drvinfo = typhoon_get_drvinfo,
goto out;
}
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
out:
"(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
le32_to_cpu(xp_resp[0].parm2));
}
-
+
return 0;
error_out_reset:
static int __init
typhoon_init(void)
{
- return pci_module_init(&typhoon_driver);
+ return pci_register_driver(&typhoon_driver);
}
static void __exit
*/
#define DRV_NAME "via-rhine"
- #define DRV_VERSION "1.4.1"
- #define DRV_RELDATE "July-24-2006"
+ #define DRV_VERSION "1.4.2"
+ #define DRV_RELDATE "Sept-11-2006"
/* A few user-configurable values.
static void rhine_set_rx_mode(struct net_device *dev);
static struct net_device_stats *rhine_get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
- static struct ethtool_ops netdev_ethtool_ops;
+ static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
static void rhine_shutdown (struct pci_dev *pdev);
rp->tx_skbuff[entry] = skb;
if ((rp->quirks & rqRhineI) &&
- (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
+ (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
rx_mode = 0x1C;
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
return 0;
}
- static struct ethtool_ops netdev_ethtool_ops = {
+ static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
#ifdef MODULE
printk(version);
#endif
- return pci_module_init(&rhine_driver);
+ return pci_register_driver(&rhine_driver);
}
static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
- static struct ethtool_ops velocity_ethtool_ops;
+ static const struct ethtool_ops velocity_ethtool_ops;
/*
Define module options
if (val == -1)
*opt |= (def ? flag : 0);
else if (val < 0 || val > 1) {
- printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
+ printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
devname, name);
*opt |= (def ? flag : 0);
} else {
- printk(KERN_INFO "%s: set parameter %s to %s\n",
+ printk(KERN_INFO "%s: set parameter %s to %s\n",
devname, name, val ? "TRUE" : "FALSE");
*opt |= (val ? flag : 0);
}
* hardware.
*/
- static void velocity_init_registers(struct velocity_info *vptr,
+ static void velocity_init_registers(struct velocity_info *vptr,
enum velocity_init_type type)
{
struct mac_regs __iomem * regs = vptr->mac_regs;
mac_clear_isr(regs);
writel(CR0_STOP, ®s->CR0Clr);
- writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
®s->CR0Set);
break;
* can support more than MAX_UNITS.
*/
if (velocity_nics >= MAX_UNITS) {
- dev_notice(&pdev->dev, "already found %d NICs.\n",
+ dev_notice(&pdev->dev, "already found %d NICs.\n",
velocity_nics);
return -ENODEV;
}
dev_err(&pdev->dev, "allocate net device failed.\n");
goto out;
}
-
+
/* Chain it all together */
-
+
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
vptr = netdev_priv(dev);
if (first) {
- printk(KERN_INFO "%s Ver. %s\n",
+ printk(KERN_INFO "%s Ver. %s\n",
VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
dev->irq = pdev->irq;
ret = pci_enable_device(pdev);
- if (ret < 0)
+ if (ret < 0)
goto err_free_dev;
ret = velocity_get_pci_info(vptr, pdev);
velocity_get_options(&vptr->options, velocity_nics, dev->name);
- /*
+ /*
* Mask out the options cannot be set to the chip
*/
-
+
vptr->options.flags &= info->flags;
/*
* Enable the chip specified capbilities
*/
-
+
vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
vptr->wol_opts = vptr->options.wol_opts;
velocity_print_info(vptr);
pci_set_drvdata(pdev, dev);
-
+
/* and leave the chip powered down */
-
+
pci_set_power_state(pdev, PCI_D3hot);
#ifdef CONFIG_PM
{
struct net_device *dev = vptr->dev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
+ dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
}
{
if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
return -EIO;
-
+
pci_set_master(pdev);
vptr->ioaddr = pci_resource_start(pdev, 0);
vptr->memaddr = pci_resource_start(pdev, 1);
-
+
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
dev_err(&pdev->dev,
"region #0 is not an I/O resource, aborting.\n");
u8 *pool;
/*
- * Allocate all RD/TD rings a single pool
+ * Allocate all RD/TD rings a single pool
*/
-
- psize = vptr->options.numrx * sizeof(struct rx_desc) +
+
+ psize = vptr->options.numrx * sizeof(struct rx_desc) +
vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
/*
pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma);
if (pool == NULL) {
- printk(KERN_ERR "%s : DMA memory allocation failed.\n",
+ printk(KERN_ERR "%s : DMA memory allocation failed.\n",
vptr->dev->name);
return -ENOMEM;
}
vptr->rd_pool_dma = pool_dma;
tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
- vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
+ vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
&vptr->tx_bufs_dma);
if (vptr->tx_bufs == NULL) {
- printk(KERN_ERR "%s: DMA memory allocation failed.\n",
+ printk(KERN_ERR "%s: DMA memory allocation failed.\n",
vptr->dev->name);
pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
return -ENOMEM;
{
int size;
- size = vptr->options.numrx * sizeof(struct rx_desc) +
+ size = vptr->options.numrx * sizeof(struct rx_desc) +
vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
break;
}
done++;
- dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
+ dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
} while (dirty != vptr->rd_curr);
if (done) {
static int velocity_init_rd_ring(struct velocity_info *vptr)
{
int ret = -ENOMEM;
- unsigned int rsize = sizeof(struct velocity_rd_info) *
+ unsigned int rsize = sizeof(struct velocity_rd_info) *
vptr->options.numrx;
vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
* Returns zero on success or a negative posix errno code for
* failure.
*/
-
+
static int velocity_init_td_ring(struct velocity_info *vptr)
{
int i, j;
dma_addr_t curr;
struct tx_desc *td;
struct velocity_td_info *td_info;
- unsigned int tsize = sizeof(struct velocity_td_info) *
+ unsigned int tsize = sizeof(struct velocity_td_info) *
vptr->options.numtx;
/* Init the TD ring entries */
{
struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
int i;
-
+
if (td_info == NULL)
return;
-
+
if (td_info->skb) {
for (i = 0; i < td_info->nskb_dma; i++)
{
if (td_info->skb_dma[i]) {
- pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
+ pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
td_info->skb->len, PCI_DMA_TODEVICE);
td_info->skb_dma[i] = (dma_addr_t) NULL;
}
* Free up the transmit ring for this particular velocity adapter.
* We free the ring contents but not the ring itself.
*/
-
+
static void velocity_free_td_ring(struct velocity_info *vptr)
{
int i, j;
* any received packets from the receive queue. Hand the ring
* slots back to the adapter for reuse.
*/
-
+
static int velocity_rx_srv(struct velocity_info *vptr, int status)
{
struct net_device_stats *stats = &vptr->stats;
* Process the status bits for the received packet and determine
* if the checksum was computed and verified by the hardware
*/
-
+
static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
if (rd->rdesc1.CSM & CSM_IPKT) {
if (rd->rdesc1.CSM & CSM_IPOK) {
- if ((rd->rdesc1.CSM & CSM_TCPKT) ||
+ if ((rd->rdesc1.CSM & CSM_TCPKT) ||
(rd->rdesc1.CSM & CSM_UDPKT)) {
if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
return;
*rx_skb = new_skb;
ret = 0;
}
-
+
}
return ret;
}
* velocity_receive_frame - received packet processor
* @vptr: velocity we are handling
* @idx: ring index
- *
+ *
* A packet has arrived. We process the packet and if appropriate
* pass the frame up the network stack
*/
-
+
static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{
void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
/*
* Drop frame not meeting IEEE 802.3
*/
-
+
if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
if (rd->rdesc0.RSR & RSR_RL) {
stats->rx_length_errors++;
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len - 4);
- skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->protocol = eth_type_trans(skb, skb->dev);
stats->rx_bytes += pkt_len;
netif_rx(skb);
* requires *64* byte alignment of the buffer which makes life
* less fun than would be ideal.
*/
-
+
static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
{
struct rx_desc *rd = &(vptr->rd_ring[idx]);
skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
rd_info->skb->dev = vptr->dev;
rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
+
/*
* Fill in the descriptor to match
- */
-
+ */
+
*((u32 *) & (rd->rdesc0)) = 0;
rd->len = cpu_to_le32(vptr->rx_buf_sz);
rd->inten = 1;
* we can complete and clean up. Update any statistics as
* neccessary/
*/
-
+
static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
{
struct tx_desc *td;
struct net_device_stats *stats = &vptr->stats;
for (qnum = 0; qnum < vptr->num_txq; qnum++) {
- for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
+ for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
idx = (idx + 1) % vptr->options.numtx) {
/*
* @status: card status
*
* Process an error report from the hardware and attempt to recover
- * the card itself. At the moment we cannot recover from some
+ * the card itself. At the moment we cannot recover from some
* theoretically impossible errors but this could be fixed using
* the pci_device_failed logic to bounce the hardware
*
*/
-
+
static void velocity_error(struct velocity_info *vptr, int status)
{
BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
writew(TRDCSR_RUN, ®s->TDCSRClr);
netif_stop_queue(vptr->dev);
-
+
/* FIXME: port over the pci_device_failed code and use it
here */
}
vptr->mii_status = check_connection_type(regs);
/*
- * If it is a 3119, disable frame bursting in
+ * If it is a 3119, disable frame bursting in
* halfduplex mode and enable it in fullduplex
* mode
*/
enable_flow_control_ability(vptr);
/*
- * Re-enable auto-polling because SRCI will disable
+ * Re-enable auto-polling because SRCI will disable
* auto-polling
*/
-
+
enable_mii_autopoll(regs);
if (vptr->mii_status & VELOCITY_LINK_FAIL)
* Release an transmit buffer. If the buffer was preallocated then
* recycle it, if not then unmap the buffer.
*/
-
+
static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
{
struct sk_buff *skb = tdinfo->skb;
* All the ring allocation and set up is done on open for this
* adapter to minimise memory usage when inactive
*/
-
+
static int velocity_open(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
ret = velocity_init_td_ring(vptr);
if (ret < 0)
goto err_free_rd_ring;
-
- /* Ensure chip is running */
+
+ /* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
-
+
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
goto out;
}
- /**
+ /**
* velocity_change_mtu - MTU change callback
* @dev: network device
* @new_mtu: desired MTU
* this interface. It gets called on a change by the network layer.
* Return zero for success or negative posix error code.
*/
-
+
static int velocity_change_mtu(struct net_device *dev, int new_mtu)
{
struct velocity_info *vptr = netdev_priv(dev);
int ret = 0;
if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
- VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
vptr->dev->name);
return -EINVAL;
}
* Shuts down the internal operations of the velocity and
* disables interrupts, autopolling, transmit and receive
*/
-
+
static void velocity_shutdown(struct velocity_info *vptr)
{
struct mac_regs __iomem * regs = vptr->mac_regs;
velocity_get_ip(vptr);
if (dev->irq != 0)
free_irq(dev->irq, dev);
-
+
/* Power down the chip */
pci_set_power_state(vptr->pdev, PCI_D3hot);
-
+
/* Free the resources */
velocity_free_td_ring(vptr);
velocity_free_rd_ring(vptr);
* Called by the networ layer to request a packet is queued to
* the velocity. Returns zero on success.
*/
-
+
static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
td_ptr->td_buf[0].queue = 0;
/*
- * Pad short frames.
+ * Pad short frames.
*/
if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */
if (nfrags > 6) {
memcpy(tdinfo->buf, skb->data, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize =
+ td_ptr->tdesc0.pktsize =
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
* Handle hardware checksum
*/
if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
- && (skb->ip_summed == CHECKSUM_HW)) {
+ && (skb->ip_summed == CHECKSUM_PARTIAL)) {
struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
td_ptr->tdesc1.TCR |= TCR0_TCPCK;
* and need to identify initially if we are, and if not exit as
* efficiently as possible.
*/
-
+
static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *dev = dev_instance;
* Keep processing the ISR until we have completed
* processing and the isr_status becomes zero
*/
-
+
while (isr_status != 0) {
mac_write_isr(vptr->mac_regs, isr_status);
if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
isr_status = mac_read_isr(vptr->mac_regs);
if (max_count > vptr->options.int_works)
{
- printk(KERN_WARNING "%s: excessive work at interrupt.\n",
+ printk(KERN_WARNING "%s: excessive work at interrupt.\n",
dev->name);
max_count = 0;
}
* for a velocity adapter. Reload the CAMs with the new address
* filter ruleset.
*/
-
+
static void velocity_set_multi(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
struct dev_mc_list *mclist;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
writel(0xffffffff, ®s->MARCAM[0]);
writel(0xffffffff, ®s->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
* the hardware into the counters before letting the network
* layer display them.
*/
-
+
static struct net_device_stats *velocity_get_stats(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
-
+
/* If the hardware is down, don't touch MII */
if(!netif_running(dev))
return &vptr->stats;
* Called when the user issues an ioctl request to the network
* device in question. The velocity interface supports MII.
*/
-
+
static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
/* If we are asked for information and the device is power
saving then we need to bring the device back up to talk to it */
-
+
if (!netif_running(dev))
pci_set_power_state(vptr->pdev, PCI_D0);
-
+
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
case SIOCGMIIREG: /* Read MII PHY register. */
}
if (!netif_running(dev))
pci_set_power_state(vptr->pdev, PCI_D3hot);
-
-
+
+
return ret;
}
* Definition for our device driver. The PCI layer interface
* uses this to handle all our card discover and plugging
*/
-
+
static struct pci_driver velocity_driver = {
.name = VELOCITY_NAME,
.id_table = velocity_id_table,
* the probe functions for each velocity adapter installed
* in the system.
*/
-
+
static int __init velocity_init_module(void)
{
int ret;
velocity_register_notifier();
- ret = pci_module_init(&velocity_driver);
+ ret = pci_register_driver(&velocity_driver);
if (ret < 0)
velocity_unregister_notifier();
return ret;
* velocity_cleanup - module unload
*
* When the velocity hardware is unloaded this function is called.
- * It will clean up the notifiers and the unregister the PCI
+ * It will clean up the notifiers and the unregister the PCI
* driver interface for this hardware. This in turn cleans up
* all discovered interfaces before returning from the function
*/
-
+
static void __exit velocity_cleanup_module(void)
{
velocity_unregister_notifier();
/*
* MII access , media link mode setting functions
*/
-
-
+
+
/**
* mii_init - set up MII
* @vptr: velocity adapter
*
* Set up the PHY for the current link state.
*/
-
+
static void mii_init(struct velocity_info *vptr, u32 mii_status)
{
u16 BMCR;
MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
- * off it in NWay-forced half mode for NWay-forced v.s.
+ * off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue.
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
- * off it in NWay-forced half mode for NWay-forced v.s.
+ * off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
case PHYID_MARVELL_1000:
case PHYID_MARVELL_1000S:
/*
- * Assert CRS on Transmit
+ * Assert CRS on Transmit
*/
MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
/*
- * Reset to hardware default
+ * Reset to hardware default
*/
MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
break;
*
* Turn off the autopoll and wait for it to disable on the chip
*/
-
+
static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs)
{
u16 ww;
* Perform a single read of an MII 16bit register. Returns zero
* on success or -ETIMEDOUT if the PHY did not respond.
*/
-
+
static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
{
u16 ww;
* Perform a single write to an MII 16bit register. Returns zero
* on success or -ETIMEDOUT if the PHY did not respond.
*/
-
+
static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
{
u16 ww;
* mii_status accordingly. The requested link state information
* is also returned.
*/
-
+
static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
{
u32 status = 0;
*
* Enable autonegotation on this interface
*/
-
+
static void mii_set_auto_on(struct velocity_info *vptr)
{
if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
* Set up the flow control on this interface according to
* the supplied user/eeprom options.
*/
-
+
static void set_mii_flow_control(struct velocity_info *vptr)
{
/*Enable or Disable PAUSE in ANAR */
* PHY and also velocity hardware setup accordingly. In particular
* we need to set up CD polling and frame bursting.
*/
-
+
static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
{
u32 curr_status;
* Check the current MII status and determine the link status
* accordingly
*/
-
+
static u32 mii_check_media_mode(struct mac_regs __iomem * regs)
{
u32 status = 0;
* Called before an ethtool operation. We need to make sure the
* chip is out of D3 state before we poke at it.
*/
-
+
static int velocity_ethtool_up(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
if (!netif_running(dev))
pci_set_power_state(vptr->pdev, PCI_D0);
return 0;
- }
+ }
/**
* velocity_ethtool_down - post hook for ethtool
* Called after an ethtool operation. Restore the chip back to D3
* state if it isn't running.
*/
-
+
static void velocity_ethtool_down(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
cmd->duplex = DUPLEX_FULL;
else
cmd->duplex = DUPLEX_HALF;
-
+
return 0;
}
u32 curr_status;
u32 new_status = 0;
int ret = 0;
-
+
curr_status = check_connection_type(vptr->mac_regs);
curr_status &= (~VELOCITY_LINK_FAIL);
msglevel = value;
}
- static struct ethtool_ops velocity_ethtool_ops = {
+ static const struct ethtool_ops velocity_ethtool_ops = {
.get_settings = velocity_get_settings,
.set_settings = velocity_set_settings,
.get_drvinfo = velocity_get_drvinfo,
* are used by tools like kudzu to interrogate the link state of the
* hardware
*/
-
+
static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
unsigned long flags;
struct mii_ioctl_data *miidata = if_mii(ifr);
int err;
-
+
switch (cmd) {
case SIOCGMIIPHY:
miidata->phy_id = readb(®s->MIIADR) & 0x1f;
/**
* velocity_save_context - save registers
- * @vptr: velocity
+ * @vptr: velocity
* @context: buffer for stored context
*
* Retrieve the current configuration from the velocity hardware
* restore functions. This allows us to save things we need across
* power down states
*/
-
+
static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context)
{
struct mac_regs __iomem * regs = vptr->mac_regs;
/**
* velocity_restore_context - restore registers
- * @vptr: velocity
+ * @vptr: velocity
* @context: buffer for stored context
*
- * Reload the register configuration from the velocity context
+ * Reload the register configuration from the velocity context
* created by velocity_save_context.
*/
-
+
static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
{
struct mac_regs __iomem * regs = vptr->mac_regs;
/* Instance data managed by the core of Wireless Extensions. */
struct iw_public_data * wireless_data;
- struct ethtool_ops *ethtool_ops;
+ const struct ethtool_ops *ethtool_ops;
/*
* This marks the end of the "visible" part of the structure. All
extern int netdev_max_backlog;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
-extern int skb_checksum_help(struct sk_buff *skb, int inward);
+extern int skb_checksum_help(struct sk_buff *skb);
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
- unlikely(skb->ip_summed != CHECKSUM_HW));
+ unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
/* On bonding slaves other than the currently active slave, suppress