]>
Commit | Line | Data |
---|---|---|
bbd2190c VB |
1 | /* Altera Triple-Speed Ethernet MAC driver |
2 | * Copyright (C) 2008-2014 Altera Corporation. All rights reserved | |
3 | * | |
4 | * Contributors: | |
5 | * Dalon Westergreen | |
6 | * Thomas Chou | |
7 | * Ian Abbott | |
8 | * Yuriy Kozlov | |
9 | * Tobias Klauser | |
10 | * Andriy Smolskyy | |
11 | * Roman Bulgakov | |
12 | * Dmytro Mytarchuk | |
13 | * Matthew Gerlach | |
14 | * | |
15 | * Original driver contributed by SLS. | |
16 | * Major updates contributed by GlobalLogic | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or modify it | |
19 | * under the terms and conditions of the GNU General Public License, | |
20 | * version 2, as published by the Free Software Foundation. | |
21 | * | |
22 | * This program is distributed in the hope it will be useful, but WITHOUT | |
23 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
24 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
25 | * more details. | |
26 | * | |
27 | * You should have received a copy of the GNU General Public License along with | |
28 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
29 | */ | |
30 | ||
31 | #include <linux/atomic.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/etherdevice.h> | |
34 | #include <linux/if_vlan.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/interrupt.h> | |
37 | #include <linux/io.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <linux/module.h> | |
3b804564 | 40 | #include <linux/mii.h> |
bbd2190c VB |
41 | #include <linux/netdevice.h> |
42 | #include <linux/of_device.h> | |
43 | #include <linux/of_mdio.h> | |
44 | #include <linux/of_net.h> | |
45 | #include <linux/of_platform.h> | |
46 | #include <linux/phy.h> | |
47 | #include <linux/platform_device.h> | |
48 | #include <linux/skbuff.h> | |
49 | #include <asm/cacheflush.h> | |
50 | ||
51 | #include "altera_utils.h" | |
52 | #include "altera_tse.h" | |
53 | #include "altera_sgdma.h" | |
54 | #include "altera_msgdma.h" | |
55 | ||
56 | static atomic_t instance_count = ATOMIC_INIT(~0); | |
57 | /* Module parameters */ | |
58 | static int debug = -1; | |
59 | module_param(debug, int, S_IRUGO | S_IWUSR); | |
60 | MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); | |
61 | ||
62 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | |
63 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | |
64 | NETIF_MSG_IFDOWN); | |
65 | ||
66 | #define RX_DESCRIPTORS 64 | |
67 | static int dma_rx_num = RX_DESCRIPTORS; | |
68 | module_param(dma_rx_num, int, S_IRUGO | S_IWUSR); | |
69 | MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); | |
70 | ||
71 | #define TX_DESCRIPTORS 64 | |
72 | static int dma_tx_num = TX_DESCRIPTORS; | |
73 | module_param(dma_tx_num, int, S_IRUGO | S_IWUSR); | |
74 | MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); | |
75 | ||
76 | ||
77 | #define POLL_PHY (-1) | |
78 | ||
79 | /* Make sure DMA buffer size is larger than the max frame size | |
80 | * plus some alignment offset and a VLAN header. If the max frame size is | |
81 | * 1518, a VLAN header would be additional 4 bytes and additional | |
82 | * headroom for alignment is 2 bytes, 2048 is just fine. | |
83 | */ | |
84 | #define ALTERA_RXDMABUFFER_SIZE 2048 | |
85 | ||
86 | /* Allow network stack to resume queueing packets after we've | |
87 | * finished transmitting at least 1/4 of the packets in the queue. | |
88 | */ | |
89 | #define TSE_TX_THRESH(x) (x->tx_ring_size / 4) | |
90 | ||
91 | #define TXQUEUESTOP_THRESHHOLD 2 | |
92 | ||
27260530 | 93 | static const struct of_device_id altera_tse_ids[]; |
bbd2190c VB |
94 | |
95 | static inline u32 tse_tx_avail(struct altera_tse_private *priv) | |
96 | { | |
97 | return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; | |
98 | } | |
99 | ||
3b804564 NW |
100 | /* PCS Register read/write functions |
101 | */ | |
102 | static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum) | |
103 | { | |
104 | return csrrd32(priv->mac_dev, | |
105 | tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; | |
106 | } | |
107 | ||
108 | static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum, | |
109 | u16 value) | |
110 | { | |
111 | csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); | |
112 | } | |
113 | ||
114 | /* Check PCS scratch memory */ | |
115 | static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value) | |
116 | { | |
117 | sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value); | |
118 | return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value); | |
119 | } | |
120 | ||
bbd2190c VB |
121 | /* MDIO specific functions |
122 | */ | |
123 | static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |
124 | { | |
89830580 VB |
125 | struct net_device *ndev = bus->priv; |
126 | struct altera_tse_private *priv = netdev_priv(ndev); | |
bbd2190c VB |
127 | |
128 | /* set MDIO address */ | |
89830580 | 129 | csrwr32((mii_id & 0x1f), priv->mac_dev, |
a923fc73 | 130 | tse_csroffs(mdio_phy1_addr)); |
bbd2190c VB |
131 | |
132 | /* get the data */ | |
89830580 | 133 | return csrrd32(priv->mac_dev, |
a923fc73 | 134 | tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; |
bbd2190c VB |
135 | } |
136 | ||
137 | static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |
138 | u16 value) | |
139 | { | |
89830580 VB |
140 | struct net_device *ndev = bus->priv; |
141 | struct altera_tse_private *priv = netdev_priv(ndev); | |
bbd2190c VB |
142 | |
143 | /* set MDIO address */ | |
89830580 | 144 | csrwr32((mii_id & 0x1f), priv->mac_dev, |
a923fc73 | 145 | tse_csroffs(mdio_phy1_addr)); |
bbd2190c VB |
146 | |
147 | /* write the data */ | |
a923fc73 | 148 | csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); |
bbd2190c VB |
149 | return 0; |
150 | } | |
151 | ||
152 | static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) | |
153 | { | |
154 | struct altera_tse_private *priv = netdev_priv(dev); | |
155 | int ret; | |
bbd2190c VB |
156 | struct device_node *mdio_node = NULL; |
157 | struct mii_bus *mdio = NULL; | |
158 | struct device_node *child_node = NULL; | |
159 | ||
160 | for_each_child_of_node(priv->device->of_node, child_node) { | |
161 | if (of_device_is_compatible(child_node, "altr,tse-mdio")) { | |
162 | mdio_node = child_node; | |
163 | break; | |
164 | } | |
165 | } | |
166 | ||
167 | if (mdio_node) { | |
168 | netdev_dbg(dev, "FOUND MDIO subnode\n"); | |
169 | } else { | |
170 | netdev_dbg(dev, "NO MDIO subnode\n"); | |
171 | return 0; | |
172 | } | |
173 | ||
174 | mdio = mdiobus_alloc(); | |
175 | if (mdio == NULL) { | |
176 | netdev_err(dev, "Error allocating MDIO bus\n"); | |
177 | return -ENOMEM; | |
178 | } | |
179 | ||
180 | mdio->name = ALTERA_TSE_RESOURCE_NAME; | |
181 | mdio->read = &altera_tse_mdio_read; | |
182 | mdio->write = &altera_tse_mdio_write; | |
183 | snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); | |
184 | ||
89830580 | 185 | mdio->priv = dev; |
bbd2190c VB |
186 | mdio->parent = priv->device; |
187 | ||
188 | ret = of_mdiobus_register(mdio, mdio_node); | |
189 | if (ret != 0) { | |
190 | netdev_err(dev, "Cannot register MDIO bus %s\n", | |
191 | mdio->id); | |
e7f4dc35 | 192 | goto out_free_mdio; |
bbd2190c VB |
193 | } |
194 | ||
195 | if (netif_msg_drv(priv)) | |
196 | netdev_info(dev, "MDIO bus %s: created\n", mdio->id); | |
197 | ||
198 | priv->mdio = mdio; | |
199 | return 0; | |
bbd2190c VB |
200 | out_free_mdio: |
201 | mdiobus_free(mdio); | |
202 | mdio = NULL; | |
203 | return ret; | |
204 | } | |
205 | ||
206 | static void altera_tse_mdio_destroy(struct net_device *dev) | |
207 | { | |
208 | struct altera_tse_private *priv = netdev_priv(dev); | |
209 | ||
210 | if (priv->mdio == NULL) | |
211 | return; | |
212 | ||
213 | if (netif_msg_drv(priv)) | |
214 | netdev_info(dev, "MDIO bus %s: removed\n", | |
215 | priv->mdio->id); | |
216 | ||
217 | mdiobus_unregister(priv->mdio); | |
bbd2190c VB |
218 | mdiobus_free(priv->mdio); |
219 | priv->mdio = NULL; | |
220 | } | |
221 | ||
222 | static int tse_init_rx_buffer(struct altera_tse_private *priv, | |
223 | struct tse_buffer *rxbuffer, int len) | |
224 | { | |
225 | rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); | |
226 | if (!rxbuffer->skb) | |
227 | return -ENOMEM; | |
228 | ||
229 | rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, | |
230 | len, | |
231 | DMA_FROM_DEVICE); | |
232 | ||
233 | if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { | |
234 | netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); | |
235 | dev_kfree_skb_any(rxbuffer->skb); | |
236 | return -EINVAL; | |
237 | } | |
37c0ffaa | 238 | rxbuffer->dma_addr &= (dma_addr_t)~3; |
bbd2190c VB |
239 | rxbuffer->len = len; |
240 | return 0; | |
241 | } | |
242 | ||
243 | static void tse_free_rx_buffer(struct altera_tse_private *priv, | |
244 | struct tse_buffer *rxbuffer) | |
245 | { | |
246 | struct sk_buff *skb = rxbuffer->skb; | |
247 | dma_addr_t dma_addr = rxbuffer->dma_addr; | |
248 | ||
249 | if (skb != NULL) { | |
250 | if (dma_addr) | |
251 | dma_unmap_single(priv->device, dma_addr, | |
252 | rxbuffer->len, | |
253 | DMA_FROM_DEVICE); | |
254 | dev_kfree_skb_any(skb); | |
255 | rxbuffer->skb = NULL; | |
256 | rxbuffer->dma_addr = 0; | |
257 | } | |
258 | } | |
259 | ||
260 | /* Unmap and free Tx buffer resources | |
261 | */ | |
262 | static void tse_free_tx_buffer(struct altera_tse_private *priv, | |
263 | struct tse_buffer *buffer) | |
264 | { | |
265 | if (buffer->dma_addr) { | |
266 | if (buffer->mapped_as_page) | |
267 | dma_unmap_page(priv->device, buffer->dma_addr, | |
268 | buffer->len, DMA_TO_DEVICE); | |
269 | else | |
270 | dma_unmap_single(priv->device, buffer->dma_addr, | |
271 | buffer->len, DMA_TO_DEVICE); | |
272 | buffer->dma_addr = 0; | |
273 | } | |
274 | if (buffer->skb) { | |
275 | dev_kfree_skb_any(buffer->skb); | |
276 | buffer->skb = NULL; | |
277 | } | |
278 | } | |
279 | ||
280 | static int alloc_init_skbufs(struct altera_tse_private *priv) | |
281 | { | |
282 | unsigned int rx_descs = priv->rx_ring_size; | |
283 | unsigned int tx_descs = priv->tx_ring_size; | |
284 | int ret = -ENOMEM; | |
285 | int i; | |
286 | ||
287 | /* Create Rx ring buffer */ | |
288 | priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), | |
289 | GFP_KERNEL); | |
290 | if (!priv->rx_ring) | |
291 | goto err_rx_ring; | |
292 | ||
293 | /* Create Tx ring buffer */ | |
294 | priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer), | |
295 | GFP_KERNEL); | |
296 | if (!priv->tx_ring) | |
297 | goto err_tx_ring; | |
298 | ||
299 | priv->tx_cons = 0; | |
300 | priv->tx_prod = 0; | |
301 | ||
302 | /* Init Rx ring */ | |
303 | for (i = 0; i < rx_descs; i++) { | |
304 | ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], | |
305 | priv->rx_dma_buf_sz); | |
306 | if (ret) | |
307 | goto err_init_rx_buffers; | |
308 | } | |
309 | ||
310 | priv->rx_cons = 0; | |
311 | priv->rx_prod = 0; | |
312 | ||
313 | return 0; | |
314 | err_init_rx_buffers: | |
315 | while (--i >= 0) | |
316 | tse_free_rx_buffer(priv, &priv->rx_ring[i]); | |
317 | kfree(priv->tx_ring); | |
318 | err_tx_ring: | |
319 | kfree(priv->rx_ring); | |
320 | err_rx_ring: | |
321 | return ret; | |
322 | } | |
323 | ||
324 | static void free_skbufs(struct net_device *dev) | |
325 | { | |
326 | struct altera_tse_private *priv = netdev_priv(dev); | |
327 | unsigned int rx_descs = priv->rx_ring_size; | |
328 | unsigned int tx_descs = priv->tx_ring_size; | |
329 | int i; | |
330 | ||
331 | /* Release the DMA TX/RX socket buffers */ | |
332 | for (i = 0; i < rx_descs; i++) | |
333 | tse_free_rx_buffer(priv, &priv->rx_ring[i]); | |
334 | for (i = 0; i < tx_descs; i++) | |
335 | tse_free_tx_buffer(priv, &priv->tx_ring[i]); | |
336 | ||
337 | ||
338 | kfree(priv->tx_ring); | |
339 | } | |
340 | ||
341 | /* Reallocate the skb for the reception process | |
342 | */ | |
343 | static inline void tse_rx_refill(struct altera_tse_private *priv) | |
344 | { | |
345 | unsigned int rxsize = priv->rx_ring_size; | |
346 | unsigned int entry; | |
347 | int ret; | |
348 | ||
349 | for (; priv->rx_cons - priv->rx_prod > 0; | |
350 | priv->rx_prod++) { | |
351 | entry = priv->rx_prod % rxsize; | |
352 | if (likely(priv->rx_ring[entry].skb == NULL)) { | |
353 | ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], | |
354 | priv->rx_dma_buf_sz); | |
355 | if (unlikely(ret != 0)) | |
356 | break; | |
357 | priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); | |
358 | } | |
359 | } | |
360 | } | |
361 | ||
362 | /* Pull out the VLAN tag and fix up the packet | |
363 | */ | |
364 | static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb) | |
365 | { | |
366 | struct ethhdr *eth_hdr; | |
367 | u16 vid; | |
368 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && | |
369 | !__vlan_get_tag(skb, &vid)) { | |
370 | eth_hdr = (struct ethhdr *)skb->data; | |
371 | memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); | |
372 | skb_pull(skb, VLAN_HLEN); | |
373 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | |
374 | } | |
375 | } | |
376 | ||
377 | /* Receive a packet: retrieve and pass over to upper levels | |
378 | */ | |
379 | static int tse_rx(struct altera_tse_private *priv, int limit) | |
380 | { | |
381 | unsigned int count = 0; | |
382 | unsigned int next_entry; | |
383 | struct sk_buff *skb; | |
384 | unsigned int entry = priv->rx_cons % priv->rx_ring_size; | |
385 | u32 rxstatus; | |
386 | u16 pktlength; | |
387 | u16 pktstatus; | |
388 | ||
93ea3378 AO |
389 | /* Check for count < limit first as get_rx_status is changing |
390 | * the response-fifo so we must process the next packet | |
391 | * after calling get_rx_status if a response is pending. | |
392 | * (reading the last byte of the response pops the value from the fifo.) | |
393 | */ | |
394 | while ((count < limit) && | |
395 | ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { | |
bbd2190c VB |
396 | pktstatus = rxstatus >> 16; |
397 | pktlength = rxstatus & 0xffff; | |
398 | ||
399 | if ((pktstatus & 0xFF) || (pktlength == 0)) | |
400 | netdev_err(priv->dev, | |
401 | "RCV pktstatus %08X pktlength %08X\n", | |
402 | pktstatus, pktlength); | |
403 | ||
48734994 VS |
404 | /* DMA trasfer from TSE starts with 2 aditional bytes for |
405 | * IP payload alignment. Status returned by get_rx_status() | |
406 | * contains DMA transfer length. Packet is 2 bytes shorter. | |
407 | */ | |
408 | pktlength -= 2; | |
409 | ||
bbd2190c VB |
410 | count++; |
411 | next_entry = (++priv->rx_cons) % priv->rx_ring_size; | |
412 | ||
413 | skb = priv->rx_ring[entry].skb; | |
414 | if (unlikely(!skb)) { | |
415 | netdev_err(priv->dev, | |
416 | "%s: Inconsistent Rx descriptor chain\n", | |
417 | __func__); | |
418 | priv->dev->stats.rx_dropped++; | |
419 | break; | |
420 | } | |
421 | priv->rx_ring[entry].skb = NULL; | |
422 | ||
423 | skb_put(skb, pktlength); | |
424 | ||
bbd2190c VB |
425 | dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, |
426 | priv->rx_ring[entry].len, DMA_FROM_DEVICE); | |
427 | ||
428 | if (netif_msg_pktdata(priv)) { | |
429 | netdev_info(priv->dev, "frame received %d bytes\n", | |
430 | pktlength); | |
431 | print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, | |
432 | 16, 1, skb->data, pktlength, true); | |
433 | } | |
434 | ||
435 | tse_rx_vlan(priv->dev, skb); | |
436 | ||
437 | skb->protocol = eth_type_trans(skb, priv->dev); | |
438 | skb_checksum_none_assert(skb); | |
439 | ||
440 | napi_gro_receive(&priv->napi, skb); | |
441 | ||
442 | priv->dev->stats.rx_packets++; | |
443 | priv->dev->stats.rx_bytes += pktlength; | |
444 | ||
445 | entry = next_entry; | |
37c0ffaa VB |
446 | |
447 | tse_rx_refill(priv); | |
bbd2190c VB |
448 | } |
449 | ||
bbd2190c VB |
450 | return count; |
451 | } | |
452 | ||
453 | /* Reclaim resources after transmission completes | |
454 | */ | |
455 | static int tse_tx_complete(struct altera_tse_private *priv) | |
456 | { | |
457 | unsigned int txsize = priv->tx_ring_size; | |
458 | u32 ready; | |
459 | unsigned int entry; | |
460 | struct tse_buffer *tx_buff; | |
461 | int txcomplete = 0; | |
462 | ||
463 | spin_lock(&priv->tx_lock); | |
464 | ||
465 | ready = priv->dmaops->tx_completions(priv); | |
466 | ||
467 | /* Free sent buffers */ | |
468 | while (ready && (priv->tx_cons != priv->tx_prod)) { | |
469 | entry = priv->tx_cons % txsize; | |
470 | tx_buff = &priv->tx_ring[entry]; | |
471 | ||
472 | if (netif_msg_tx_done(priv)) | |
473 | netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n", | |
474 | __func__, priv->tx_prod, priv->tx_cons); | |
475 | ||
476 | if (likely(tx_buff->skb)) | |
477 | priv->dev->stats.tx_packets++; | |
478 | ||
479 | tse_free_tx_buffer(priv, tx_buff); | |
480 | priv->tx_cons++; | |
481 | ||
482 | txcomplete++; | |
483 | ready--; | |
484 | } | |
485 | ||
486 | if (unlikely(netif_queue_stopped(priv->dev) && | |
487 | tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { | |
bbd2190c VB |
488 | if (netif_queue_stopped(priv->dev) && |
489 | tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { | |
490 | if (netif_msg_tx_done(priv)) | |
491 | netdev_dbg(priv->dev, "%s: restart transmit\n", | |
492 | __func__); | |
493 | netif_wake_queue(priv->dev); | |
494 | } | |
bbd2190c VB |
495 | } |
496 | ||
497 | spin_unlock(&priv->tx_lock); | |
498 | return txcomplete; | |
499 | } | |
500 | ||
501 | /* NAPI polling function | |
502 | */ | |
503 | static int tse_poll(struct napi_struct *napi, int budget) | |
504 | { | |
505 | struct altera_tse_private *priv = | |
506 | container_of(napi, struct altera_tse_private, napi); | |
507 | int rxcomplete = 0; | |
bbd2190c VB |
508 | unsigned long int flags; |
509 | ||
8d4ac39d | 510 | tse_tx_complete(priv); |
bbd2190c VB |
511 | |
512 | rxcomplete = tse_rx(priv, budget); | |
513 | ||
8d4ac39d | 514 | if (rxcomplete < budget) { |
bbd2190c | 515 | |
6ad20165 | 516 | napi_complete_done(napi, rxcomplete); |
bbd2190c | 517 | |
8d4ac39d VS |
518 | netdev_dbg(priv->dev, |
519 | "NAPI Complete, did %d packets with budget %d\n", | |
520 | rxcomplete, budget); | |
bbd2190c | 521 | |
8d4ac39d VS |
522 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); |
523 | priv->dmaops->enable_rxirq(priv); | |
524 | priv->dmaops->enable_txirq(priv); | |
525 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | |
526 | } | |
527 | return rxcomplete; | |
bbd2190c VB |
528 | } |
529 | ||
530 | /* DMA TX & RX FIFO interrupt routing | |
531 | */ | |
532 | static irqreturn_t altera_isr(int irq, void *dev_id) | |
533 | { | |
534 | struct net_device *dev = dev_id; | |
535 | struct altera_tse_private *priv; | |
bbd2190c | 536 | |
bbd2190c VB |
537 | if (unlikely(!dev)) { |
538 | pr_err("%s: invalid dev pointer\n", __func__); | |
539 | return IRQ_NONE; | |
540 | } | |
541 | priv = netdev_priv(dev); | |
542 | ||
8d4ac39d VS |
543 | spin_lock(&priv->rxdma_irq_lock); |
544 | /* reset IRQs */ | |
545 | priv->dmaops->clear_rxirq(priv); | |
546 | priv->dmaops->clear_txirq(priv); | |
547 | spin_unlock(&priv->rxdma_irq_lock); | |
bbd2190c VB |
548 | |
549 | if (likely(napi_schedule_prep(&priv->napi))) { | |
8d4ac39d | 550 | spin_lock(&priv->rxdma_irq_lock); |
bbd2190c VB |
551 | priv->dmaops->disable_rxirq(priv); |
552 | priv->dmaops->disable_txirq(priv); | |
8d4ac39d | 553 | spin_unlock(&priv->rxdma_irq_lock); |
bbd2190c VB |
554 | __napi_schedule(&priv->napi); |
555 | } | |
556 | ||
bbd2190c VB |
557 | |
558 | return IRQ_HANDLED; | |
559 | } | |
560 | ||
561 | /* Transmit a packet (called by the kernel). Dispatches | |
562 | * either the SGDMA method for transmitting or the | |
563 | * MSGDMA method, assumes no scatter/gather support, | |
564 | * implying an assumption that there's only one | |
565 | * physically contiguous fragment starting at | |
566 | * skb->data, for length of skb_headlen(skb). | |
567 | */ | |
568 | static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
569 | { | |
570 | struct altera_tse_private *priv = netdev_priv(dev); | |
571 | unsigned int txsize = priv->tx_ring_size; | |
572 | unsigned int entry; | |
573 | struct tse_buffer *buffer = NULL; | |
574 | int nfrags = skb_shinfo(skb)->nr_frags; | |
575 | unsigned int nopaged_len = skb_headlen(skb); | |
576 | enum netdev_tx ret = NETDEV_TX_OK; | |
577 | dma_addr_t dma_addr; | |
bbd2190c VB |
578 | |
579 | spin_lock_bh(&priv->tx_lock); | |
580 | ||
581 | if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { | |
582 | if (!netif_queue_stopped(dev)) { | |
583 | netif_stop_queue(dev); | |
584 | /* This is a hard error, log it. */ | |
585 | netdev_err(priv->dev, | |
586 | "%s: Tx list full when queue awake\n", | |
587 | __func__); | |
588 | } | |
589 | ret = NETDEV_TX_BUSY; | |
590 | goto out; | |
591 | } | |
592 | ||
593 | /* Map the first skb fragment */ | |
594 | entry = priv->tx_prod % txsize; | |
595 | buffer = &priv->tx_ring[entry]; | |
596 | ||
597 | dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, | |
598 | DMA_TO_DEVICE); | |
599 | if (dma_mapping_error(priv->device, dma_addr)) { | |
600 | netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); | |
601 | ret = NETDEV_TX_OK; | |
602 | goto out; | |
603 | } | |
604 | ||
605 | buffer->skb = skb; | |
606 | buffer->dma_addr = dma_addr; | |
607 | buffer->len = nopaged_len; | |
608 | ||
89830580 | 609 | priv->dmaops->tx_buffer(priv, buffer); |
bbd2190c VB |
610 | |
611 | skb_tx_timestamp(skb); | |
612 | ||
613 | priv->tx_prod++; | |
614 | dev->stats.tx_bytes += skb->len; | |
615 | ||
616 | if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { | |
617 | if (netif_msg_hw(priv)) | |
618 | netdev_dbg(priv->dev, "%s: stop transmitted packets\n", | |
619 | __func__); | |
620 | netif_stop_queue(dev); | |
621 | } | |
622 | ||
623 | out: | |
624 | spin_unlock_bh(&priv->tx_lock); | |
625 | ||
626 | return ret; | |
627 | } | |
628 | ||
629 | /* Called every time the controller might need to be made | |
630 | * aware of new link state. The PHY code conveys this | |
631 | * information through variables in the phydev structure, and this | |
632 | * function converts those variables into the appropriate | |
633 | * register values, and can bring down the device if needed. | |
634 | */ | |
635 | static void altera_tse_adjust_link(struct net_device *dev) | |
636 | { | |
637 | struct altera_tse_private *priv = netdev_priv(dev); | |
941ea69e | 638 | struct phy_device *phydev = dev->phydev; |
bbd2190c VB |
639 | int new_state = 0; |
640 | ||
641 | /* only change config if there is a link */ | |
642 | spin_lock(&priv->mac_cfg_lock); | |
643 | if (phydev->link) { | |
644 | /* Read old config */ | |
645 | u32 cfg_reg = ioread32(&priv->mac_dev->command_config); | |
646 | ||
647 | /* Check duplex */ | |
648 | if (phydev->duplex != priv->oldduplex) { | |
649 | new_state = 1; | |
650 | if (!(phydev->duplex)) | |
651 | cfg_reg |= MAC_CMDCFG_HD_ENA; | |
652 | else | |
653 | cfg_reg &= ~MAC_CMDCFG_HD_ENA; | |
654 | ||
655 | netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n", | |
656 | dev->name, phydev->duplex); | |
657 | ||
658 | priv->oldduplex = phydev->duplex; | |
659 | } | |
660 | ||
661 | /* Check speed */ | |
662 | if (phydev->speed != priv->oldspeed) { | |
663 | new_state = 1; | |
664 | switch (phydev->speed) { | |
665 | case 1000: | |
666 | cfg_reg |= MAC_CMDCFG_ETH_SPEED; | |
667 | cfg_reg &= ~MAC_CMDCFG_ENA_10; | |
668 | break; | |
669 | case 100: | |
670 | cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; | |
671 | cfg_reg &= ~MAC_CMDCFG_ENA_10; | |
672 | break; | |
673 | case 10: | |
674 | cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; | |
675 | cfg_reg |= MAC_CMDCFG_ENA_10; | |
676 | break; | |
677 | default: | |
678 | if (netif_msg_link(priv)) | |
679 | netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n", | |
680 | phydev->speed); | |
681 | break; | |
682 | } | |
683 | priv->oldspeed = phydev->speed; | |
684 | } | |
685 | iowrite32(cfg_reg, &priv->mac_dev->command_config); | |
686 | ||
687 | if (!priv->oldlink) { | |
688 | new_state = 1; | |
689 | priv->oldlink = 1; | |
690 | } | |
691 | } else if (priv->oldlink) { | |
692 | new_state = 1; | |
693 | priv->oldlink = 0; | |
694 | priv->oldspeed = 0; | |
695 | priv->oldduplex = -1; | |
696 | } | |
697 | ||
698 | if (new_state && netif_msg_link(priv)) | |
699 | phy_print_status(phydev); | |
700 | ||
701 | spin_unlock(&priv->mac_cfg_lock); | |
702 | } | |
703 | static struct phy_device *connect_local_phy(struct net_device *dev) | |
704 | { | |
705 | struct altera_tse_private *priv = netdev_priv(dev); | |
706 | struct phy_device *phydev = NULL; | |
707 | char phy_id_fmt[MII_BUS_ID_SIZE + 3]; | |
bbd2190c VB |
708 | |
709 | if (priv->phy_addr != POLL_PHY) { | |
710 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, | |
711 | priv->mdio->id, priv->phy_addr); | |
712 | ||
713 | netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt); | |
714 | ||
715 | phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, | |
716 | priv->phy_iface); | |
717 | if (IS_ERR(phydev)) | |
718 | netdev_err(dev, "Could not attach to PHY\n"); | |
719 | ||
720 | } else { | |
89830580 | 721 | int ret; |
bbd2190c VB |
722 | phydev = phy_find_first(priv->mdio); |
723 | if (phydev == NULL) { | |
724 | netdev_err(dev, "No PHY found\n"); | |
725 | return phydev; | |
726 | } | |
727 | ||
728 | ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link, | |
729 | priv->phy_iface); | |
730 | if (ret != 0) { | |
731 | netdev_err(dev, "Could not attach to PHY\n"); | |
732 | phydev = NULL; | |
733 | } | |
734 | } | |
735 | return phydev; | |
736 | } | |
737 | ||
004fa118 WL |
738 | static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev) |
739 | { | |
740 | struct altera_tse_private *priv = netdev_priv(dev); | |
741 | struct device_node *np = priv->device->of_node; | |
742 | int ret = 0; | |
743 | ||
744 | priv->phy_iface = of_get_phy_mode(np); | |
745 | ||
3354313e WL |
746 | /* Avoid get phy addr and create mdio if no phy is present */ |
747 | if (!priv->phy_iface) | |
748 | return 0; | |
749 | ||
004fa118 WL |
750 | /* try to get PHY address from device tree, use PHY autodetection if |
751 | * no valid address is given | |
752 | */ | |
753 | ||
754 | if (of_property_read_u32(priv->device->of_node, "phy-addr", | |
755 | &priv->phy_addr)) { | |
756 | priv->phy_addr = POLL_PHY; | |
757 | } | |
758 | ||
759 | if (!((priv->phy_addr == POLL_PHY) || | |
760 | ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { | |
761 | netdev_err(dev, "invalid phy-addr specified %d\n", | |
762 | priv->phy_addr); | |
763 | return -ENODEV; | |
764 | } | |
765 | ||
766 | /* Create/attach to MDIO bus */ | |
767 | ret = altera_tse_mdio_create(dev, | |
768 | atomic_add_return(1, &instance_count)); | |
769 | ||
770 | if (ret) | |
771 | return -ENODEV; | |
772 | ||
773 | return 0; | |
774 | } | |
775 | ||
bbd2190c VB |
776 | /* Initialize driver's PHY state, and attach to the PHY |
777 | */ | |
778 | static int init_phy(struct net_device *dev) | |
779 | { | |
780 | struct altera_tse_private *priv = netdev_priv(dev); | |
781 | struct phy_device *phydev; | |
782 | struct device_node *phynode; | |
7cdbc6f7 AO |
783 | bool fixed_link = false; |
784 | int rc = 0; | |
bbd2190c | 785 | |
3354313e WL |
786 | /* Avoid init phy in case of no phy present */ |
787 | if (!priv->phy_iface) | |
788 | return 0; | |
789 | ||
bbd2190c VB |
790 | priv->oldlink = 0; |
791 | priv->oldspeed = 0; | |
792 | priv->oldduplex = -1; | |
793 | ||
794 | phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); | |
795 | ||
796 | if (!phynode) { | |
7cdbc6f7 AO |
797 | /* check if a fixed-link is defined in device-tree */ |
798 | if (of_phy_is_fixed_link(priv->device->of_node)) { | |
799 | rc = of_phy_register_fixed_link(priv->device->of_node); | |
800 | if (rc < 0) { | |
801 | netdev_err(dev, "cannot register fixed PHY\n"); | |
802 | return rc; | |
803 | } | |
804 | ||
805 | /* In the case of a fixed PHY, the DT node associated | |
806 | * to the PHY is the Ethernet MAC DT node. | |
807 | */ | |
808 | phynode = of_node_get(priv->device->of_node); | |
809 | fixed_link = true; | |
810 | ||
811 | netdev_dbg(dev, "fixed-link detected\n"); | |
812 | phydev = of_phy_connect(dev, phynode, | |
813 | &altera_tse_adjust_link, | |
814 | 0, priv->phy_iface); | |
815 | } else { | |
816 | netdev_dbg(dev, "no phy-handle found\n"); | |
817 | if (!priv->mdio) { | |
818 | netdev_err(dev, "No phy-handle nor local mdio specified\n"); | |
819 | return -ENODEV; | |
820 | } | |
821 | phydev = connect_local_phy(dev); | |
bbd2190c | 822 | } |
bbd2190c VB |
823 | } else { |
824 | netdev_dbg(dev, "phy-handle found\n"); | |
825 | phydev = of_phy_connect(dev, phynode, | |
826 | &altera_tse_adjust_link, 0, priv->phy_iface); | |
827 | } | |
5d97222a | 828 | of_node_put(phynode); |
bbd2190c VB |
829 | |
830 | if (!phydev) { | |
831 | netdev_err(dev, "Could not find the PHY\n"); | |
5a89394a JH |
832 | if (fixed_link) |
833 | of_phy_deregister_fixed_link(priv->device->of_node); | |
bbd2190c VB |
834 | return -ENODEV; |
835 | } | |
836 | ||
837 | /* Stop Advertising 1000BASE Capability if interface is not GMII | |
838 | * Note: Checkpatch throws CHECKs for the camel case defines below, | |
839 | * it's ok to ignore. | |
840 | */ | |
841 | if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || | |
842 | (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) | |
843 | phydev->advertising &= ~(SUPPORTED_1000baseT_Half | | |
844 | SUPPORTED_1000baseT_Full); | |
845 | ||
846 | /* Broken HW is sometimes missing the pull-up resistor on the | |
847 | * MDIO line, which results in reads to non-existent devices returning | |
848 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent | |
7cdbc6f7 | 849 | * device as well. If a fixed-link is used the phy_id is always 0. |
bbd2190c VB |
850 | * Note: phydev->phy_id is the result of reading the UID PHY registers. |
851 | */ | |
7cdbc6f7 | 852 | if ((phydev->phy_id == 0) && !fixed_link) { |
bbd2190c VB |
853 | netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); |
854 | phy_disconnect(phydev); | |
855 | return -ENODEV; | |
856 | } | |
857 | ||
858 | netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", | |
e5a03bfd | 859 | phydev->mdio.addr, phydev->phy_id, phydev->link); |
bbd2190c | 860 | |
bbd2190c VB |
861 | return 0; |
862 | } | |
863 | ||
864 | static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) | |
865 | { | |
bbd2190c VB |
866 | u32 msb; |
867 | u32 lsb; | |
868 | ||
869 | msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; | |
870 | lsb = ((addr[5] << 8) | addr[4]) & 0xffff; | |
871 | ||
872 | /* Set primary MAC address */ | |
89830580 VB |
873 | csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); |
874 | csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); | |
bbd2190c VB |
875 | } |
876 | ||
877 | /* MAC software reset. | |
878 | * When reset is triggered, the MAC function completes the current | |
879 | * transmission or reception, and subsequently disables the transmit and | |
880 | * receive logic, flushes the receive FIFO buffer, and resets the statistics | |
881 | * counters. | |
882 | */ | |
883 | static int reset_mac(struct altera_tse_private *priv) | |
884 | { | |
bbd2190c VB |
885 | int counter; |
886 | u32 dat; | |
887 | ||
89830580 | 888 | dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c VB |
889 | dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); |
890 | dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; | |
89830580 | 891 | csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c VB |
892 | |
893 | counter = 0; | |
894 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { | |
89830580 VB |
895 | if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), |
896 | MAC_CMDCFG_SW_RESET)) | |
bbd2190c VB |
897 | break; |
898 | udelay(1); | |
899 | } | |
900 | ||
901 | if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { | |
89830580 | 902 | dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c | 903 | dat &= ~MAC_CMDCFG_SW_RESET; |
89830580 | 904 | csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c VB |
905 | return -1; |
906 | } | |
907 | return 0; | |
908 | } | |
909 | ||
910 | /* Initialize MAC core registers | |
911 | */ | |
912 | static int init_mac(struct altera_tse_private *priv) | |
913 | { | |
bbd2190c VB |
914 | unsigned int cmd = 0; |
915 | u32 frm_length; | |
916 | ||
917 | /* Setup Rx FIFO */ | |
89830580 VB |
918 | csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, |
919 | priv->mac_dev, tse_csroffs(rx_section_empty)); | |
920 | ||
921 | csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, | |
922 | tse_csroffs(rx_section_full)); | |
923 | ||
924 | csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, | |
925 | tse_csroffs(rx_almost_empty)); | |
926 | ||
927 | csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, | |
928 | tse_csroffs(rx_almost_full)); | |
bbd2190c VB |
929 | |
930 | /* Setup Tx FIFO */ | |
89830580 VB |
931 | csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, |
932 | priv->mac_dev, tse_csroffs(tx_section_empty)); | |
933 | ||
934 | csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, | |
935 | tse_csroffs(tx_section_full)); | |
936 | ||
937 | csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, | |
938 | tse_csroffs(tx_almost_empty)); | |
939 | ||
940 | csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, | |
941 | tse_csroffs(tx_almost_full)); | |
bbd2190c VB |
942 | |
943 | /* MAC Address Configuration */ | |
944 | tse_update_mac_addr(priv, priv->dev->dev_addr); | |
945 | ||
946 | /* MAC Function Configuration */ | |
947 | frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; | |
89830580 VB |
948 | csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); |
949 | ||
950 | csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, | |
951 | tse_csroffs(tx_ipg_length)); | |
bbd2190c VB |
952 | |
953 | /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit | |
954 | * start address | |
955 | */ | |
89830580 VB |
956 | tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), |
957 | ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); | |
958 | ||
959 | tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), | |
960 | ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | | |
961 | ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); | |
bbd2190c VB |
962 | |
963 | /* Set the MAC options */ | |
89830580 | 964 | cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
37c0ffaa | 965 | cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ |
bbd2190c VB |
966 | cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ |
967 | cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames | |
968 | * with CRC errors | |
969 | */ | |
970 | cmd |= MAC_CMDCFG_CNTL_FRM_ENA; | |
971 | cmd &= ~MAC_CMDCFG_TX_ENA; | |
972 | cmd &= ~MAC_CMDCFG_RX_ENA; | |
37c0ffaa VB |
973 | |
974 | /* Default speed and duplex setting, full/100 */ | |
975 | cmd &= ~MAC_CMDCFG_HD_ENA; | |
976 | cmd &= ~MAC_CMDCFG_ETH_SPEED; | |
977 | cmd &= ~MAC_CMDCFG_ENA_10; | |
978 | ||
89830580 | 979 | csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c | 980 | |
89830580 VB |
981 | csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, |
982 | tse_csroffs(pause_quanta)); | |
5aec4ee3 | 983 | |
bbd2190c VB |
984 | if (netif_msg_hw(priv)) |
985 | dev_dbg(priv->device, | |
986 | "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd); | |
987 | ||
988 | return 0; | |
989 | } | |
990 | ||
991 | /* Start/stop MAC transmission logic | |
992 | */ | |
993 | static void tse_set_mac(struct altera_tse_private *priv, bool enable) | |
994 | { | |
89830580 | 995 | u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c VB |
996 | |
997 | if (enable) | |
998 | value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; | |
999 | else | |
1000 | value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); | |
1001 | ||
89830580 | 1002 | csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); |
bbd2190c VB |
1003 | } |
1004 | ||
1005 | /* Change the MTU | |
1006 | */ | |
1007 | static int tse_change_mtu(struct net_device *dev, int new_mtu) | |
1008 | { | |
bbd2190c VB |
1009 | if (netif_running(dev)) { |
1010 | netdev_err(dev, "must be stopped to change its MTU\n"); | |
1011 | return -EBUSY; | |
1012 | } | |
1013 | ||
bbd2190c VB |
1014 | dev->mtu = new_mtu; |
1015 | netdev_update_features(dev); | |
1016 | ||
1017 | return 0; | |
1018 | } | |
1019 | ||
1020 | static void altera_tse_set_mcfilter(struct net_device *dev) | |
1021 | { | |
1022 | struct altera_tse_private *priv = netdev_priv(dev); | |
bbd2190c VB |
1023 | int i; |
1024 | struct netdev_hw_addr *ha; | |
1025 | ||
1026 | /* clear the hash filter */ | |
1027 | for (i = 0; i < 64; i++) | |
89830580 | 1028 | csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); |
bbd2190c VB |
1029 | |
1030 | netdev_for_each_mc_addr(ha, dev) { | |
1031 | unsigned int hash = 0; | |
1032 | int mac_octet; | |
1033 | ||
1034 | for (mac_octet = 5; mac_octet >= 0; mac_octet--) { | |
1035 | unsigned char xor_bit = 0; | |
1036 | unsigned char octet = ha->addr[mac_octet]; | |
1037 | unsigned int bitshift; | |
1038 | ||
1039 | for (bitshift = 0; bitshift < 8; bitshift++) | |
1040 | xor_bit ^= ((octet >> bitshift) & 0x01); | |
1041 | ||
1042 | hash = (hash << 1) | xor_bit; | |
1043 | } | |
89830580 | 1044 | csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); |
bbd2190c VB |
1045 | } |
1046 | } | |
1047 | ||
1048 | ||
1049 | static void altera_tse_set_mcfilterall(struct net_device *dev) | |
1050 | { | |
1051 | struct altera_tse_private *priv = netdev_priv(dev); | |
bbd2190c VB |
1052 | int i; |
1053 | ||
1054 | /* set the hash filter */ | |
1055 | for (i = 0; i < 64; i++) | |
89830580 | 1056 | csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); |
bbd2190c VB |
1057 | } |
1058 | ||
1059 | /* Set or clear the multicast filter for this adaptor | |
1060 | */ | |
1061 | static void tse_set_rx_mode_hashfilter(struct net_device *dev) | |
1062 | { | |
1063 | struct altera_tse_private *priv = netdev_priv(dev); | |
bbd2190c VB |
1064 | |
1065 | spin_lock(&priv->mac_cfg_lock); | |
1066 | ||
1067 | if (dev->flags & IFF_PROMISC) | |
89830580 VB |
1068 | tse_set_bit(priv->mac_dev, tse_csroffs(command_config), |
1069 | MAC_CMDCFG_PROMIS_EN); | |
bbd2190c VB |
1070 | |
1071 | if (dev->flags & IFF_ALLMULTI) | |
1072 | altera_tse_set_mcfilterall(dev); | |
1073 | else | |
1074 | altera_tse_set_mcfilter(dev); | |
1075 | ||
1076 | spin_unlock(&priv->mac_cfg_lock); | |
1077 | } | |
1078 | ||
1079 | /* Set or clear the multicast filter for this adaptor | |
1080 | */ | |
1081 | static void tse_set_rx_mode(struct net_device *dev) | |
1082 | { | |
1083 | struct altera_tse_private *priv = netdev_priv(dev); | |
bbd2190c VB |
1084 | |
1085 | spin_lock(&priv->mac_cfg_lock); | |
1086 | ||
1087 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || | |
1088 | !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) | |
89830580 VB |
1089 | tse_set_bit(priv->mac_dev, tse_csroffs(command_config), |
1090 | MAC_CMDCFG_PROMIS_EN); | |
bbd2190c | 1091 | else |
89830580 VB |
1092 | tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), |
1093 | MAC_CMDCFG_PROMIS_EN); | |
bbd2190c VB |
1094 | |
1095 | spin_unlock(&priv->mac_cfg_lock); | |
1096 | } | |
1097 | ||
3b804564 NW |
1098 | /* Initialise (if necessary) the SGMII PCS component |
1099 | */ | |
1100 | static int init_sgmii_pcs(struct net_device *dev) | |
1101 | { | |
1102 | struct altera_tse_private *priv = netdev_priv(dev); | |
1103 | int n; | |
1104 | unsigned int tmp_reg = 0; | |
1105 | ||
1106 | if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII) | |
1107 | return 0; /* Nothing to do, not in SGMII mode */ | |
1108 | ||
1109 | /* The TSE SGMII PCS block looks a little like a PHY, it is | |
1110 | * mapped into the zeroth MDIO space of the MAC and it has | |
1111 | * ID registers like a PHY would. Sadly this is often | |
1112 | * configured to zeroes, so don't be surprised if it does | |
1113 | * show 0x00000000. | |
1114 | */ | |
1115 | ||
1116 | if (sgmii_pcs_scratch_test(priv, 0x0000) && | |
1117 | sgmii_pcs_scratch_test(priv, 0xffff) && | |
1118 | sgmii_pcs_scratch_test(priv, 0xa5a5) && | |
1119 | sgmii_pcs_scratch_test(priv, 0x5a5a)) { | |
1120 | netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n", | |
1121 | sgmii_pcs_read(priv, MII_PHYSID1), | |
1122 | sgmii_pcs_read(priv, MII_PHYSID2)); | |
1123 | } else { | |
1124 | netdev_err(dev, "SGMII PCS Scratch memory test failed.\n"); | |
1125 | return -ENOMEM; | |
1126 | } | |
1127 | ||
1128 | /* Starting on page 5-29 of the MegaCore Function User Guide | |
1129 | * Set SGMII Link timer to 1.6ms | |
1130 | */ | |
1131 | sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40); | |
1132 | sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03); | |
1133 | ||
1134 | /* Enable SGMII Interface and Enable SGMII Auto Negotiation */ | |
1135 | sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3); | |
1136 | ||
1137 | /* Enable Autonegotiation */ | |
1138 | tmp_reg = sgmii_pcs_read(priv, MII_BMCR); | |
1139 | tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE); | |
1140 | sgmii_pcs_write(priv, MII_BMCR, tmp_reg); | |
1141 | ||
1142 | /* Reset PCS block */ | |
1143 | tmp_reg |= BMCR_RESET; | |
1144 | sgmii_pcs_write(priv, MII_BMCR, tmp_reg); | |
1145 | for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) { | |
1146 | if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) { | |
1147 | netdev_info(dev, "SGMII PCS block initialised OK\n"); | |
1148 | return 0; | |
1149 | } | |
1150 | udelay(1); | |
1151 | } | |
1152 | ||
1153 | /* We failed to reset the block, return a timeout */ | |
1154 | netdev_err(dev, "SGMII PCS block reset failed.\n"); | |
1155 | return -ETIMEDOUT; | |
1156 | } | |
1157 | ||
bbd2190c VB |
1158 | /* Open and initialize the interface |
1159 | */ | |
1160 | static int tse_open(struct net_device *dev) | |
1161 | { | |
1162 | struct altera_tse_private *priv = netdev_priv(dev); | |
1163 | int ret = 0; | |
1164 | int i; | |
1165 | unsigned long int flags; | |
1166 | ||
1167 | /* Reset and configure TSE MAC and probe associated PHY */ | |
1168 | ret = priv->dmaops->init_dma(priv); | |
1169 | if (ret != 0) { | |
1170 | netdev_err(dev, "Cannot initialize DMA\n"); | |
1171 | goto phy_error; | |
1172 | } | |
1173 | ||
1174 | if (netif_msg_ifup(priv)) | |
1175 | netdev_warn(dev, "device MAC address %pM\n", | |
1176 | dev->dev_addr); | |
1177 | ||
1178 | if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) | |
1179 | netdev_warn(dev, "TSE revision %x\n", priv->revision); | |
1180 | ||
1181 | spin_lock(&priv->mac_cfg_lock); | |
3b804564 NW |
1182 | /* no-op if MAC not operating in SGMII mode*/ |
1183 | ret = init_sgmii_pcs(dev); | |
1184 | if (ret) { | |
1185 | netdev_err(dev, | |
1186 | "Cannot init the SGMII PCS (error: %d)\n", ret); | |
1187 | spin_unlock(&priv->mac_cfg_lock); | |
1188 | goto phy_error; | |
1189 | } | |
1190 | ||
bbd2190c | 1191 | ret = reset_mac(priv); |
ea8860eb VB |
1192 | /* Note that reset_mac will fail if the clocks are gated by the PHY |
1193 | * due to the PHY being put into isolation or power down mode. | |
1194 | * This is not an error if reset fails due to no clock. | |
1195 | */ | |
bbd2190c | 1196 | if (ret) |
ea8860eb | 1197 | netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); |
bbd2190c VB |
1198 | |
1199 | ret = init_mac(priv); | |
1200 | spin_unlock(&priv->mac_cfg_lock); | |
1201 | if (ret) { | |
1202 | netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret); | |
1203 | goto alloc_skbuf_error; | |
1204 | } | |
1205 | ||
1206 | priv->dmaops->reset_dma(priv); | |
1207 | ||
1208 | /* Create and initialize the TX/RX descriptors chains. */ | |
1209 | priv->rx_ring_size = dma_rx_num; | |
1210 | priv->tx_ring_size = dma_tx_num; | |
1211 | ret = alloc_init_skbufs(priv); | |
1212 | if (ret) { | |
1213 | netdev_err(dev, "DMA descriptors initialization failed\n"); | |
1214 | goto alloc_skbuf_error; | |
1215 | } | |
1216 | ||
1217 | ||
1218 | /* Register RX interrupt */ | |
1219 | ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED, | |
1220 | dev->name, dev); | |
1221 | if (ret) { | |
1222 | netdev_err(dev, "Unable to register RX interrupt %d\n", | |
1223 | priv->rx_irq); | |
1224 | goto init_error; | |
1225 | } | |
1226 | ||
1227 | /* Register TX interrupt */ | |
1228 | ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED, | |
1229 | dev->name, dev); | |
1230 | if (ret) { | |
1231 | netdev_err(dev, "Unable to register TX interrupt %d\n", | |
1232 | priv->tx_irq); | |
1233 | goto tx_request_irq_error; | |
1234 | } | |
1235 | ||
1236 | /* Enable DMA interrupts */ | |
1237 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); | |
1238 | priv->dmaops->enable_rxirq(priv); | |
1239 | priv->dmaops->enable_txirq(priv); | |
1240 | ||
1241 | /* Setup RX descriptor chain */ | |
1242 | for (i = 0; i < priv->rx_ring_size; i++) | |
1243 | priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); | |
1244 | ||
1245 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | |
1246 | ||
941ea69e PR |
1247 | if (dev->phydev) |
1248 | phy_start(dev->phydev); | |
bbd2190c VB |
1249 | |
1250 | napi_enable(&priv->napi); | |
1251 | netif_start_queue(dev); | |
1252 | ||
37c0ffaa VB |
1253 | priv->dmaops->start_rxdma(priv); |
1254 | ||
1255 | /* Start MAC Rx/Tx */ | |
1256 | spin_lock(&priv->mac_cfg_lock); | |
1257 | tse_set_mac(priv, true); | |
1258 | spin_unlock(&priv->mac_cfg_lock); | |
1259 | ||
bbd2190c VB |
1260 | return 0; |
1261 | ||
1262 | tx_request_irq_error: | |
1263 | free_irq(priv->rx_irq, dev); | |
1264 | init_error: | |
1265 | free_skbufs(dev); | |
1266 | alloc_skbuf_error: | |
bbd2190c VB |
1267 | phy_error: |
1268 | return ret; | |
1269 | } | |
1270 | ||
1271 | /* Stop TSE MAC interface and put the device in an inactive state | |
1272 | */ | |
1273 | static int tse_shutdown(struct net_device *dev) | |
1274 | { | |
1275 | struct altera_tse_private *priv = netdev_priv(dev); | |
1276 | int ret; | |
1277 | unsigned long int flags; | |
1278 | ||
c484994e | 1279 | /* Stop the PHY */ |
941ea69e PR |
1280 | if (dev->phydev) |
1281 | phy_stop(dev->phydev); | |
bbd2190c VB |
1282 | |
1283 | netif_stop_queue(dev); | |
1284 | napi_disable(&priv->napi); | |
1285 | ||
1286 | /* Disable DMA interrupts */ | |
1287 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); | |
1288 | priv->dmaops->disable_rxirq(priv); | |
1289 | priv->dmaops->disable_txirq(priv); | |
1290 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | |
1291 | ||
1292 | /* Free the IRQ lines */ | |
1293 | free_irq(priv->rx_irq, dev); | |
1294 | free_irq(priv->tx_irq, dev); | |
1295 | ||
1296 | /* disable and reset the MAC, empties fifo */ | |
1297 | spin_lock(&priv->mac_cfg_lock); | |
1298 | spin_lock(&priv->tx_lock); | |
1299 | ||
1300 | ret = reset_mac(priv); | |
ea8860eb VB |
1301 | /* Note that reset_mac will fail if the clocks are gated by the PHY |
1302 | * due to the PHY being put into isolation or power down mode. | |
1303 | * This is not an error if reset fails due to no clock. | |
1304 | */ | |
bbd2190c | 1305 | if (ret) |
ea8860eb | 1306 | netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); |
bbd2190c VB |
1307 | priv->dmaops->reset_dma(priv); |
1308 | free_skbufs(dev); | |
1309 | ||
1310 | spin_unlock(&priv->tx_lock); | |
1311 | spin_unlock(&priv->mac_cfg_lock); | |
1312 | ||
1313 | priv->dmaops->uninit_dma(priv); | |
1314 | ||
1315 | return 0; | |
1316 | } | |
1317 | ||
1318 | static struct net_device_ops altera_tse_netdev_ops = { | |
1319 | .ndo_open = tse_open, | |
1320 | .ndo_stop = tse_shutdown, | |
1321 | .ndo_start_xmit = tse_start_xmit, | |
1322 | .ndo_set_mac_address = eth_mac_addr, | |
1323 | .ndo_set_rx_mode = tse_set_rx_mode, | |
1324 | .ndo_change_mtu = tse_change_mtu, | |
1325 | .ndo_validate_addr = eth_validate_addr, | |
1326 | }; | |
1327 | ||
bbd2190c VB |
1328 | static int request_and_map(struct platform_device *pdev, const char *name, |
1329 | struct resource **res, void __iomem **ptr) | |
1330 | { | |
1331 | struct resource *region; | |
1332 | struct device *device = &pdev->dev; | |
1333 | ||
1334 | *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | |
1335 | if (*res == NULL) { | |
1336 | dev_err(device, "resource %s not defined\n", name); | |
1337 | return -ENODEV; | |
1338 | } | |
1339 | ||
1340 | region = devm_request_mem_region(device, (*res)->start, | |
1341 | resource_size(*res), dev_name(device)); | |
1342 | if (region == NULL) { | |
1343 | dev_err(device, "unable to request %s\n", name); | |
1344 | return -EBUSY; | |
1345 | } | |
1346 | ||
1347 | *ptr = devm_ioremap_nocache(device, region->start, | |
1348 | resource_size(region)); | |
1349 | if (*ptr == NULL) { | |
1350 | dev_err(device, "ioremap_nocache of %s failed!", name); | |
1351 | return -ENOMEM; | |
1352 | } | |
1353 | ||
1354 | return 0; | |
1355 | } | |
1356 | ||
1357 | /* Probe Altera TSE MAC device | |
1358 | */ | |
1359 | static int altera_tse_probe(struct platform_device *pdev) | |
1360 | { | |
1361 | struct net_device *ndev; | |
1362 | int ret = -ENODEV; | |
1363 | struct resource *control_port; | |
1364 | struct resource *dma_res; | |
1365 | struct altera_tse_private *priv; | |
1366 | const unsigned char *macaddr; | |
bbd2190c VB |
1367 | void __iomem *descmap; |
1368 | const struct of_device_id *of_id = NULL; | |
1369 | ||
1370 | ndev = alloc_etherdev(sizeof(struct altera_tse_private)); | |
1371 | if (!ndev) { | |
1372 | dev_err(&pdev->dev, "Could not allocate network device\n"); | |
1373 | return -ENODEV; | |
1374 | } | |
1375 | ||
1376 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1377 | ||
1378 | priv = netdev_priv(ndev); | |
1379 | priv->device = &pdev->dev; | |
1380 | priv->dev = ndev; | |
1381 | priv->msg_enable = netif_msg_init(debug, default_msg_level); | |
1382 | ||
1383 | of_id = of_match_device(altera_tse_ids, &pdev->dev); | |
1384 | ||
1385 | if (of_id) | |
1386 | priv->dmaops = (struct altera_dmaops *)of_id->data; | |
1387 | ||
1388 | ||
1389 | if (priv->dmaops && | |
1390 | priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { | |
1391 | /* Get the mapped address to the SGDMA descriptor memory */ | |
1392 | ret = request_and_map(pdev, "s1", &dma_res, &descmap); | |
1393 | if (ret) | |
a7642009 | 1394 | goto err_free_netdev; |
bbd2190c VB |
1395 | |
1396 | /* Start of that memory is for transmit descriptors */ | |
1397 | priv->tx_dma_desc = descmap; | |
1398 | ||
1399 | /* First half is for tx descriptors, other half for tx */ | |
1400 | priv->txdescmem = resource_size(dma_res)/2; | |
1401 | ||
1402 | priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; | |
1403 | ||
1404 | priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + | |
1405 | priv->txdescmem)); | |
1406 | priv->rxdescmem = resource_size(dma_res)/2; | |
1407 | priv->rxdescmem_busaddr = dma_res->start; | |
1408 | priv->rxdescmem_busaddr += priv->txdescmem; | |
1409 | ||
1410 | if (upper_32_bits(priv->rxdescmem_busaddr)) { | |
1411 | dev_dbg(priv->device, | |
1412 | "SGDMA bus addresses greater than 32-bits\n"); | |
a24a9d7a | 1413 | ret = -EINVAL; |
a7642009 | 1414 | goto err_free_netdev; |
bbd2190c VB |
1415 | } |
1416 | if (upper_32_bits(priv->txdescmem_busaddr)) { | |
1417 | dev_dbg(priv->device, | |
1418 | "SGDMA bus addresses greater than 32-bits\n"); | |
a24a9d7a | 1419 | ret = -EINVAL; |
a7642009 | 1420 | goto err_free_netdev; |
bbd2190c VB |
1421 | } |
1422 | } else if (priv->dmaops && | |
1423 | priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { | |
1424 | ret = request_and_map(pdev, "rx_resp", &dma_res, | |
1425 | &priv->rx_dma_resp); | |
1426 | if (ret) | |
a7642009 | 1427 | goto err_free_netdev; |
bbd2190c VB |
1428 | |
1429 | ret = request_and_map(pdev, "tx_desc", &dma_res, | |
1430 | &priv->tx_dma_desc); | |
1431 | if (ret) | |
a7642009 | 1432 | goto err_free_netdev; |
bbd2190c VB |
1433 | |
1434 | priv->txdescmem = resource_size(dma_res); | |
1435 | priv->txdescmem_busaddr = dma_res->start; | |
1436 | ||
1437 | ret = request_and_map(pdev, "rx_desc", &dma_res, | |
1438 | &priv->rx_dma_desc); | |
1439 | if (ret) | |
a7642009 | 1440 | goto err_free_netdev; |
bbd2190c VB |
1441 | |
1442 | priv->rxdescmem = resource_size(dma_res); | |
1443 | priv->rxdescmem_busaddr = dma_res->start; | |
1444 | ||
1445 | } else { | |
a7642009 | 1446 | goto err_free_netdev; |
bbd2190c VB |
1447 | } |
1448 | ||
1449 | if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) | |
1450 | dma_set_coherent_mask(priv->device, | |
1451 | DMA_BIT_MASK(priv->dmaops->dmamask)); | |
1452 | else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) | |
1453 | dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); | |
1454 | else | |
a7642009 | 1455 | goto err_free_netdev; |
bbd2190c VB |
1456 | |
1457 | /* MAC address space */ | |
1458 | ret = request_and_map(pdev, "control_port", &control_port, | |
1459 | (void __iomem **)&priv->mac_dev); | |
1460 | if (ret) | |
a7642009 | 1461 | goto err_free_netdev; |
bbd2190c VB |
1462 | |
1463 | /* xSGDMA Rx Dispatcher address space */ | |
1464 | ret = request_and_map(pdev, "rx_csr", &dma_res, | |
1465 | &priv->rx_dma_csr); | |
1466 | if (ret) | |
a7642009 | 1467 | goto err_free_netdev; |
bbd2190c VB |
1468 | |
1469 | ||
1470 | /* xSGDMA Tx Dispatcher address space */ | |
1471 | ret = request_and_map(pdev, "tx_csr", &dma_res, | |
1472 | &priv->tx_dma_csr); | |
1473 | if (ret) | |
a7642009 | 1474 | goto err_free_netdev; |
bbd2190c VB |
1475 | |
1476 | ||
1477 | /* Rx IRQ */ | |
1478 | priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); | |
1479 | if (priv->rx_irq == -ENXIO) { | |
1480 | dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); | |
1481 | ret = -ENXIO; | |
a7642009 | 1482 | goto err_free_netdev; |
bbd2190c VB |
1483 | } |
1484 | ||
1485 | /* Tx IRQ */ | |
1486 | priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); | |
1487 | if (priv->tx_irq == -ENXIO) { | |
1488 | dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); | |
1489 | ret = -ENXIO; | |
a7642009 | 1490 | goto err_free_netdev; |
bbd2190c VB |
1491 | } |
1492 | ||
1493 | /* get FIFO depths from device tree */ | |
1494 | if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", | |
1495 | &priv->rx_fifo_depth)) { | |
1496 | dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); | |
1497 | ret = -ENXIO; | |
a7642009 | 1498 | goto err_free_netdev; |
bbd2190c VB |
1499 | } |
1500 | ||
1501 | if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", | |
fe6e4081 | 1502 | &priv->tx_fifo_depth)) { |
bbd2190c VB |
1503 | dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); |
1504 | ret = -ENXIO; | |
a7642009 | 1505 | goto err_free_netdev; |
bbd2190c VB |
1506 | } |
1507 | ||
1508 | /* get hash filter settings for this instance */ | |
1509 | priv->hash_filter = | |
1510 | of_property_read_bool(pdev->dev.of_node, | |
1511 | "altr,has-hash-multicast-filter"); | |
1512 | ||
d91e5c02 VB |
1513 | /* Set hash filter to not set for now until the |
1514 | * multicast filter receive issue is debugged | |
1515 | */ | |
1516 | priv->hash_filter = 0; | |
1517 | ||
bbd2190c VB |
1518 | /* get supplemental address settings for this instance */ |
1519 | priv->added_unicast = | |
1520 | of_property_read_bool(pdev->dev.of_node, | |
1521 | "altr,has-supplementary-unicast"); | |
1522 | ||
44770e11 | 1523 | priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; |
bbd2190c | 1524 | /* Max MTU is 1500, ETH_DATA_LEN */ |
44770e11 | 1525 | priv->dev->max_mtu = ETH_DATA_LEN; |
bbd2190c VB |
1526 | |
1527 | /* Get the max mtu from the device tree. Note that the | |
1528 | * "max-frame-size" parameter is actually max mtu. Definition | |
1529 | * in the ePAPR v1.1 spec and usage differ, so go with usage. | |
1530 | */ | |
1531 | of_property_read_u32(pdev->dev.of_node, "max-frame-size", | |
44770e11 | 1532 | &priv->dev->max_mtu); |
bbd2190c VB |
1533 | |
1534 | /* The DMA buffer size already accounts for an alignment bias | |
1535 | * to avoid unaligned access exceptions for the NIOS processor, | |
1536 | */ | |
1537 | priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; | |
1538 | ||
1539 | /* get default MAC address from device tree */ | |
1540 | macaddr = of_get_mac_address(pdev->dev.of_node); | |
1541 | if (macaddr) | |
1542 | ether_addr_copy(ndev->dev_addr, macaddr); | |
1543 | else | |
1544 | eth_hw_addr_random(ndev); | |
1545 | ||
004fa118 WL |
1546 | /* get phy addr and create mdio */ |
1547 | ret = altera_tse_phy_get_addr_mdio_create(ndev); | |
bbd2190c VB |
1548 | |
1549 | if (ret) | |
a7642009 | 1550 | goto err_free_netdev; |
bbd2190c VB |
1551 | |
1552 | /* initialize netdev */ | |
bbd2190c VB |
1553 | ndev->mem_start = control_port->start; |
1554 | ndev->mem_end = control_port->end; | |
1555 | ndev->netdev_ops = &altera_tse_netdev_ops; | |
1556 | altera_tse_set_ethtool_ops(ndev); | |
1557 | ||
1558 | altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode; | |
1559 | ||
1560 | if (priv->hash_filter) | |
1561 | altera_tse_netdev_ops.ndo_set_rx_mode = | |
1562 | tse_set_rx_mode_hashfilter; | |
1563 | ||
1564 | /* Scatter/gather IO is not supported, | |
1565 | * so it is turned off | |
1566 | */ | |
1567 | ndev->hw_features &= ~NETIF_F_SG; | |
1568 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; | |
1569 | ||
1570 | /* VLAN offloading of tagging, stripping and filtering is not | |
1571 | * supported by hardware, but driver will accommodate the | |
1572 | * extra 4-byte VLAN tag for processing by upper layers | |
1573 | */ | |
1574 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; | |
1575 | ||
1576 | /* setup NAPI interface */ | |
1577 | netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT); | |
1578 | ||
1579 | spin_lock_init(&priv->mac_cfg_lock); | |
1580 | spin_lock_init(&priv->tx_lock); | |
1581 | spin_lock_init(&priv->rxdma_irq_lock); | |
1582 | ||
d43cefcd | 1583 | netif_carrier_off(ndev); |
bbd2190c VB |
1584 | ret = register_netdev(ndev); |
1585 | if (ret) { | |
1586 | dev_err(&pdev->dev, "failed to register TSE net device\n"); | |
a7642009 | 1587 | goto err_register_netdev; |
bbd2190c VB |
1588 | } |
1589 | ||
1590 | platform_set_drvdata(pdev, ndev); | |
1591 | ||
1592 | priv->revision = ioread32(&priv->mac_dev->megacore_revision); | |
1593 | ||
1594 | if (netif_msg_probe(priv)) | |
1595 | dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", | |
1596 | (priv->revision >> 8) & 0xff, | |
1597 | priv->revision & 0xff, | |
1598 | (unsigned long) control_port->start, priv->rx_irq, | |
1599 | priv->tx_irq); | |
1600 | ||
1601 | ret = init_phy(ndev); | |
1602 | if (ret != 0) { | |
1603 | netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); | |
a7642009 | 1604 | goto err_init_phy; |
bbd2190c VB |
1605 | } |
1606 | return 0; | |
1607 | ||
a7642009 VB |
1608 | err_init_phy: |
1609 | unregister_netdev(ndev); | |
1610 | err_register_netdev: | |
1611 | netif_napi_del(&priv->napi); | |
bbd2190c | 1612 | altera_tse_mdio_destroy(ndev); |
a7642009 | 1613 | err_free_netdev: |
bbd2190c VB |
1614 | free_netdev(ndev); |
1615 | return ret; | |
1616 | } | |
1617 | ||
1618 | /* Remove Altera TSE MAC device | |
1619 | */ | |
1620 | static int altera_tse_remove(struct platform_device *pdev) | |
1621 | { | |
1622 | struct net_device *ndev = platform_get_drvdata(pdev); | |
5a89394a | 1623 | struct altera_tse_private *priv = netdev_priv(ndev); |
c484994e | 1624 | |
5a89394a | 1625 | if (ndev->phydev) { |
941ea69e | 1626 | phy_disconnect(ndev->phydev); |
bbd2190c | 1627 | |
5a89394a JH |
1628 | if (of_phy_is_fixed_link(priv->device->of_node)) |
1629 | of_phy_deregister_fixed_link(priv->device->of_node); | |
1630 | } | |
1631 | ||
bbd2190c VB |
1632 | platform_set_drvdata(pdev, NULL); |
1633 | altera_tse_mdio_destroy(ndev); | |
1634 | unregister_netdev(ndev); | |
1635 | free_netdev(ndev); | |
1636 | ||
1637 | return 0; | |
1638 | } | |
1639 | ||
89830580 | 1640 | static const struct altera_dmaops altera_dtype_sgdma = { |
bbd2190c VB |
1641 | .altera_dtype = ALTERA_DTYPE_SGDMA, |
1642 | .dmamask = 32, | |
1643 | .reset_dma = sgdma_reset, | |
1644 | .enable_txirq = sgdma_enable_txirq, | |
1645 | .enable_rxirq = sgdma_enable_rxirq, | |
1646 | .disable_txirq = sgdma_disable_txirq, | |
1647 | .disable_rxirq = sgdma_disable_rxirq, | |
1648 | .clear_txirq = sgdma_clear_txirq, | |
1649 | .clear_rxirq = sgdma_clear_rxirq, | |
1650 | .tx_buffer = sgdma_tx_buffer, | |
1651 | .tx_completions = sgdma_tx_completions, | |
1652 | .add_rx_desc = sgdma_add_rx_desc, | |
1653 | .get_rx_status = sgdma_rx_status, | |
1654 | .init_dma = sgdma_initialize, | |
1655 | .uninit_dma = sgdma_uninitialize, | |
37c0ffaa | 1656 | .start_rxdma = sgdma_start_rxdma, |
bbd2190c VB |
1657 | }; |
1658 | ||
89830580 | 1659 | static const struct altera_dmaops altera_dtype_msgdma = { |
bbd2190c VB |
1660 | .altera_dtype = ALTERA_DTYPE_MSGDMA, |
1661 | .dmamask = 64, | |
1662 | .reset_dma = msgdma_reset, | |
1663 | .enable_txirq = msgdma_enable_txirq, | |
1664 | .enable_rxirq = msgdma_enable_rxirq, | |
1665 | .disable_txirq = msgdma_disable_txirq, | |
1666 | .disable_rxirq = msgdma_disable_rxirq, | |
1667 | .clear_txirq = msgdma_clear_txirq, | |
1668 | .clear_rxirq = msgdma_clear_rxirq, | |
1669 | .tx_buffer = msgdma_tx_buffer, | |
1670 | .tx_completions = msgdma_tx_completions, | |
1671 | .add_rx_desc = msgdma_add_rx_desc, | |
1672 | .get_rx_status = msgdma_rx_status, | |
1673 | .init_dma = msgdma_initialize, | |
1674 | .uninit_dma = msgdma_uninitialize, | |
37c0ffaa | 1675 | .start_rxdma = msgdma_start_rxdma, |
bbd2190c VB |
1676 | }; |
1677 | ||
27260530 | 1678 | static const struct of_device_id altera_tse_ids[] = { |
bbd2190c VB |
1679 | { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, |
1680 | { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, | |
1681 | { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, | |
1682 | {}, | |
1683 | }; | |
1684 | MODULE_DEVICE_TABLE(of, altera_tse_ids); | |
1685 | ||
1686 | static struct platform_driver altera_tse_driver = { | |
1687 | .probe = altera_tse_probe, | |
1688 | .remove = altera_tse_remove, | |
1689 | .suspend = NULL, | |
1690 | .resume = NULL, | |
1691 | .driver = { | |
1692 | .name = ALTERA_TSE_RESOURCE_NAME, | |
bbd2190c VB |
1693 | .of_match_table = altera_tse_ids, |
1694 | }, | |
1695 | }; | |
1696 | ||
1697 | module_platform_driver(altera_tse_driver); | |
1698 | ||
1699 | MODULE_AUTHOR("Altera Corporation"); | |
1700 | MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver"); | |
1701 | MODULE_LICENSE("GPL v2"); |