]>
Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * [email protected] | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
8788fdc2 | 19 | #include "be_cmds.h" |
65f71b8b | 20 | #include <asm/div64.h> |
6b7c5b94 SP |
21 | |
22 | MODULE_VERSION(DRV_VER); | |
23 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | |
24 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); | |
25 | MODULE_AUTHOR("ServerEngines Corporation"); | |
26 | MODULE_LICENSE("GPL"); | |
27 | ||
28 | static unsigned int rx_frag_size = 2048; | |
29 | module_param(rx_frag_size, uint, S_IRUGO); | |
30 | MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); | |
31 | ||
6b7c5b94 | 32 | static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { |
c4ca2374 AK |
33 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, |
34 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | |
35 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | |
6b7c5b94 SP |
36 | { 0 } |
37 | }; | |
38 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | |
39 | ||
40 | static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) | |
41 | { | |
42 | struct be_dma_mem *mem = &q->dma_mem; | |
43 | if (mem->va) | |
44 | pci_free_consistent(adapter->pdev, mem->size, | |
45 | mem->va, mem->dma); | |
46 | } | |
47 | ||
48 | static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | |
49 | u16 len, u16 entry_size) | |
50 | { | |
51 | struct be_dma_mem *mem = &q->dma_mem; | |
52 | ||
53 | memset(q, 0, sizeof(*q)); | |
54 | q->len = len; | |
55 | q->entry_size = entry_size; | |
56 | mem->size = len * entry_size; | |
57 | mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); | |
58 | if (!mem->va) | |
59 | return -1; | |
60 | memset(mem->va, 0, mem->size); | |
61 | return 0; | |
62 | } | |
63 | ||
8788fdc2 | 64 | static void be_intr_set(struct be_adapter *adapter, bool enable) |
6b7c5b94 | 65 | { |
8788fdc2 | 66 | u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; |
6b7c5b94 SP |
67 | u32 reg = ioread32(addr); |
68 | u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
5f0b849e SP |
69 | |
70 | if (!enabled && enable) | |
6b7c5b94 | 71 | reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; |
5f0b849e | 72 | else if (enabled && !enable) |
6b7c5b94 | 73 | reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; |
5f0b849e | 74 | else |
6b7c5b94 | 75 | return; |
5f0b849e | 76 | |
6b7c5b94 SP |
77 | iowrite32(reg, addr); |
78 | } | |
79 | ||
8788fdc2 | 80 | static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) |
6b7c5b94 SP |
81 | { |
82 | u32 val = 0; | |
83 | val |= qid & DB_RQ_RING_ID_MASK; | |
84 | val |= posted << DB_RQ_NUM_POSTED_SHIFT; | |
8788fdc2 | 85 | iowrite32(val, adapter->db + DB_RQ_OFFSET); |
6b7c5b94 SP |
86 | } |
87 | ||
8788fdc2 | 88 | static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted) |
6b7c5b94 SP |
89 | { |
90 | u32 val = 0; | |
91 | val |= qid & DB_TXULP_RING_ID_MASK; | |
92 | val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; | |
8788fdc2 | 93 | iowrite32(val, adapter->db + DB_TXULP1_OFFSET); |
6b7c5b94 SP |
94 | } |
95 | ||
8788fdc2 | 96 | static void be_eq_notify(struct be_adapter *adapter, u16 qid, |
6b7c5b94 SP |
97 | bool arm, bool clear_int, u16 num_popped) |
98 | { | |
99 | u32 val = 0; | |
100 | val |= qid & DB_EQ_RING_ID_MASK; | |
101 | if (arm) | |
102 | val |= 1 << DB_EQ_REARM_SHIFT; | |
103 | if (clear_int) | |
104 | val |= 1 << DB_EQ_CLR_SHIFT; | |
105 | val |= 1 << DB_EQ_EVNT_SHIFT; | |
106 | val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; | |
8788fdc2 | 107 | iowrite32(val, adapter->db + DB_EQ_OFFSET); |
6b7c5b94 SP |
108 | } |
109 | ||
8788fdc2 | 110 | void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) |
6b7c5b94 SP |
111 | { |
112 | u32 val = 0; | |
113 | val |= qid & DB_CQ_RING_ID_MASK; | |
114 | if (arm) | |
115 | val |= 1 << DB_CQ_REARM_SHIFT; | |
116 | val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; | |
8788fdc2 | 117 | iowrite32(val, adapter->db + DB_CQ_OFFSET); |
6b7c5b94 SP |
118 | } |
119 | ||
6b7c5b94 SP |
120 | static int be_mac_addr_set(struct net_device *netdev, void *p) |
121 | { | |
122 | struct be_adapter *adapter = netdev_priv(netdev); | |
123 | struct sockaddr *addr = p; | |
124 | int status = 0; | |
125 | ||
a65027e4 SP |
126 | status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); |
127 | if (status) | |
128 | return status; | |
6b7c5b94 | 129 | |
a65027e4 SP |
130 | status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, |
131 | adapter->if_handle, &adapter->pmac_id); | |
6b7c5b94 SP |
132 | if (!status) |
133 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
134 | ||
135 | return status; | |
136 | } | |
137 | ||
138 | static void netdev_stats_update(struct be_adapter *adapter) | |
139 | { | |
140 | struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); | |
141 | struct be_rxf_stats *rxf_stats = &hw_stats->rxf; | |
142 | struct be_port_rxf_stats *port_stats = | |
143 | &rxf_stats->port[adapter->port_num]; | |
144 | struct net_device_stats *dev_stats = &adapter->stats.net_stats; | |
68110868 | 145 | struct be_erx_stats *erx_stats = &hw_stats->erx; |
6b7c5b94 SP |
146 | |
147 | dev_stats->rx_packets = port_stats->rx_total_frames; | |
148 | dev_stats->tx_packets = port_stats->tx_unicastframes + | |
149 | port_stats->tx_multicastframes + port_stats->tx_broadcastframes; | |
150 | dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | | |
151 | (u64) port_stats->rx_bytes_lsd; | |
152 | dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 | | |
153 | (u64) port_stats->tx_bytes_lsd; | |
154 | ||
155 | /* bad pkts received */ | |
156 | dev_stats->rx_errors = port_stats->rx_crc_errors + | |
157 | port_stats->rx_alignment_symbol_errors + | |
158 | port_stats->rx_in_range_errors + | |
68110868 SP |
159 | port_stats->rx_out_range_errors + |
160 | port_stats->rx_frame_too_long + | |
161 | port_stats->rx_dropped_too_small + | |
162 | port_stats->rx_dropped_too_short + | |
163 | port_stats->rx_dropped_header_too_small + | |
164 | port_stats->rx_dropped_tcp_length + | |
165 | port_stats->rx_dropped_runt + | |
166 | port_stats->rx_tcp_checksum_errs + | |
167 | port_stats->rx_ip_checksum_errs + | |
168 | port_stats->rx_udp_checksum_errs; | |
169 | ||
170 | /* no space in linux buffers: best possible approximation */ | |
171 | dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0]; | |
6b7c5b94 SP |
172 | |
173 | /* detailed rx errors */ | |
174 | dev_stats->rx_length_errors = port_stats->rx_in_range_errors + | |
68110868 SP |
175 | port_stats->rx_out_range_errors + |
176 | port_stats->rx_frame_too_long; | |
177 | ||
6b7c5b94 SP |
178 | /* receive ring buffer overflow */ |
179 | dev_stats->rx_over_errors = 0; | |
68110868 | 180 | |
6b7c5b94 SP |
181 | dev_stats->rx_crc_errors = port_stats->rx_crc_errors; |
182 | ||
183 | /* frame alignment errors */ | |
184 | dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; | |
68110868 | 185 | |
6b7c5b94 SP |
186 | /* receiver fifo overrun */ |
187 | /* drops_no_pbuf is no per i/f, it's per BE card */ | |
188 | dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + | |
189 | port_stats->rx_input_fifo_overflow + | |
190 | rxf_stats->rx_drops_no_pbuf; | |
191 | /* receiver missed packetd */ | |
192 | dev_stats->rx_missed_errors = 0; | |
68110868 SP |
193 | |
194 | /* packet transmit problems */ | |
195 | dev_stats->tx_errors = 0; | |
196 | ||
197 | /* no space available in linux */ | |
198 | dev_stats->tx_dropped = 0; | |
199 | ||
200 | dev_stats->multicast = port_stats->tx_multicastframes; | |
201 | dev_stats->collisions = 0; | |
202 | ||
6b7c5b94 SP |
203 | /* detailed tx_errors */ |
204 | dev_stats->tx_aborted_errors = 0; | |
205 | dev_stats->tx_carrier_errors = 0; | |
206 | dev_stats->tx_fifo_errors = 0; | |
207 | dev_stats->tx_heartbeat_errors = 0; | |
208 | dev_stats->tx_window_errors = 0; | |
209 | } | |
210 | ||
8788fdc2 | 211 | void be_link_status_update(struct be_adapter *adapter, bool link_up) |
6b7c5b94 | 212 | { |
6b7c5b94 SP |
213 | struct net_device *netdev = adapter->netdev; |
214 | ||
6b7c5b94 | 215 | /* If link came up or went down */ |
a8f447bd SP |
216 | if (adapter->link_up != link_up) { |
217 | if (link_up) { | |
6b7c5b94 SP |
218 | netif_start_queue(netdev); |
219 | netif_carrier_on(netdev); | |
220 | printk(KERN_INFO "%s: Link up\n", netdev->name); | |
a8f447bd SP |
221 | } else { |
222 | netif_stop_queue(netdev); | |
223 | netif_carrier_off(netdev); | |
224 | printk(KERN_INFO "%s: Link down\n", netdev->name); | |
6b7c5b94 | 225 | } |
a8f447bd | 226 | adapter->link_up = link_up; |
6b7c5b94 | 227 | } |
6b7c5b94 SP |
228 | } |
229 | ||
230 | /* Update the EQ delay n BE based on the RX frags consumed / sec */ | |
231 | static void be_rx_eqd_update(struct be_adapter *adapter) | |
232 | { | |
6b7c5b94 SP |
233 | struct be_eq_obj *rx_eq = &adapter->rx_eq; |
234 | struct be_drvr_stats *stats = &adapter->stats.drvr_stats; | |
4097f663 SP |
235 | ulong now = jiffies; |
236 | u32 eqd; | |
237 | ||
238 | if (!rx_eq->enable_aic) | |
239 | return; | |
240 | ||
241 | /* Wrapped around */ | |
242 | if (time_before(now, stats->rx_fps_jiffies)) { | |
243 | stats->rx_fps_jiffies = now; | |
244 | return; | |
245 | } | |
6b7c5b94 SP |
246 | |
247 | /* Update once a second */ | |
4097f663 | 248 | if ((now - stats->rx_fps_jiffies) < HZ) |
6b7c5b94 SP |
249 | return; |
250 | ||
251 | stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / | |
4097f663 | 252 | ((now - stats->rx_fps_jiffies) / HZ); |
6b7c5b94 | 253 | |
4097f663 | 254 | stats->rx_fps_jiffies = now; |
6b7c5b94 SP |
255 | stats->be_prev_rx_frags = stats->be_rx_frags; |
256 | eqd = stats->be_rx_fps / 110000; | |
257 | eqd = eqd << 3; | |
258 | if (eqd > rx_eq->max_eqd) | |
259 | eqd = rx_eq->max_eqd; | |
260 | if (eqd < rx_eq->min_eqd) | |
261 | eqd = rx_eq->min_eqd; | |
262 | if (eqd < 10) | |
263 | eqd = 0; | |
264 | if (eqd != rx_eq->cur_eqd) | |
8788fdc2 | 265 | be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd); |
6b7c5b94 SP |
266 | |
267 | rx_eq->cur_eqd = eqd; | |
268 | } | |
269 | ||
6b7c5b94 SP |
270 | static struct net_device_stats *be_get_stats(struct net_device *dev) |
271 | { | |
272 | struct be_adapter *adapter = netdev_priv(dev); | |
273 | ||
274 | return &adapter->stats.net_stats; | |
275 | } | |
276 | ||
65f71b8b SH |
277 | static u32 be_calc_rate(u64 bytes, unsigned long ticks) |
278 | { | |
279 | u64 rate = bytes; | |
280 | ||
281 | do_div(rate, ticks / HZ); | |
282 | rate <<= 3; /* bytes/sec -> bits/sec */ | |
283 | do_div(rate, 1000000ul); /* MB/Sec */ | |
284 | ||
285 | return rate; | |
286 | } | |
287 | ||
4097f663 SP |
288 | static void be_tx_rate_update(struct be_adapter *adapter) |
289 | { | |
290 | struct be_drvr_stats *stats = drvr_stats(adapter); | |
291 | ulong now = jiffies; | |
292 | ||
293 | /* Wrapped around? */ | |
294 | if (time_before(now, stats->be_tx_jiffies)) { | |
295 | stats->be_tx_jiffies = now; | |
296 | return; | |
297 | } | |
298 | ||
299 | /* Update tx rate once in two seconds */ | |
300 | if ((now - stats->be_tx_jiffies) > 2 * HZ) { | |
65f71b8b SH |
301 | stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes |
302 | - stats->be_tx_bytes_prev, | |
303 | now - stats->be_tx_jiffies); | |
4097f663 SP |
304 | stats->be_tx_jiffies = now; |
305 | stats->be_tx_bytes_prev = stats->be_tx_bytes; | |
306 | } | |
307 | } | |
308 | ||
6b7c5b94 SP |
309 | static void be_tx_stats_update(struct be_adapter *adapter, |
310 | u32 wrb_cnt, u32 copied, bool stopped) | |
311 | { | |
4097f663 | 312 | struct be_drvr_stats *stats = drvr_stats(adapter); |
6b7c5b94 SP |
313 | stats->be_tx_reqs++; |
314 | stats->be_tx_wrbs += wrb_cnt; | |
315 | stats->be_tx_bytes += copied; | |
316 | if (stopped) | |
317 | stats->be_tx_stops++; | |
6b7c5b94 SP |
318 | } |
319 | ||
320 | /* Determine number of WRB entries needed to xmit data in an skb */ | |
321 | static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) | |
322 | { | |
ebc8d2ab DM |
323 | int cnt = (skb->len > skb->data_len); |
324 | ||
325 | cnt += skb_shinfo(skb)->nr_frags; | |
326 | ||
6b7c5b94 SP |
327 | /* to account for hdr wrb */ |
328 | cnt++; | |
329 | if (cnt & 1) { | |
330 | /* add a dummy to make it an even num */ | |
331 | cnt++; | |
332 | *dummy = true; | |
333 | } else | |
334 | *dummy = false; | |
335 | BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); | |
336 | return cnt; | |
337 | } | |
338 | ||
339 | static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) | |
340 | { | |
341 | wrb->frag_pa_hi = upper_32_bits(addr); | |
342 | wrb->frag_pa_lo = addr & 0xFFFFFFFF; | |
343 | wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; | |
344 | } | |
345 | ||
346 | static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, | |
347 | bool vlan, u32 wrb_cnt, u32 len) | |
348 | { | |
349 | memset(hdr, 0, sizeof(*hdr)); | |
350 | ||
351 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); | |
352 | ||
353 | if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { | |
354 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); | |
355 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, | |
356 | hdr, skb_shinfo(skb)->gso_size); | |
357 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
358 | if (is_tcp_pkt(skb)) | |
359 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); | |
360 | else if (is_udp_pkt(skb)) | |
361 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); | |
362 | } | |
363 | ||
364 | if (vlan && vlan_tx_tag_present(skb)) { | |
365 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); | |
366 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, | |
367 | hdr, vlan_tx_tag_get(skb)); | |
368 | } | |
369 | ||
370 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); | |
371 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1); | |
372 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); | |
373 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); | |
374 | } | |
375 | ||
376 | ||
377 | static int make_tx_wrbs(struct be_adapter *adapter, | |
378 | struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) | |
379 | { | |
380 | u64 busaddr; | |
381 | u32 i, copied = 0; | |
382 | struct pci_dev *pdev = adapter->pdev; | |
383 | struct sk_buff *first_skb = skb; | |
384 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
385 | struct be_eth_wrb *wrb; | |
386 | struct be_eth_hdr_wrb *hdr; | |
387 | ||
388 | atomic_add(wrb_cnt, &txq->used); | |
389 | hdr = queue_head_node(txq); | |
390 | queue_head_inc(txq); | |
391 | ||
ebc8d2ab DM |
392 | if (skb->len > skb->data_len) { |
393 | int len = skb->len - skb->data_len; | |
394 | busaddr = pci_map_single(pdev, skb->data, len, | |
395 | PCI_DMA_TODEVICE); | |
396 | wrb = queue_head_node(txq); | |
397 | wrb_fill(wrb, busaddr, len); | |
398 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
399 | queue_head_inc(txq); | |
400 | copied += len; | |
401 | } | |
6b7c5b94 | 402 | |
ebc8d2ab DM |
403 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
404 | struct skb_frag_struct *frag = | |
405 | &skb_shinfo(skb)->frags[i]; | |
406 | busaddr = pci_map_page(pdev, frag->page, | |
8788fdc2 SP |
407 | frag->page_offset, |
408 | frag->size, PCI_DMA_TODEVICE); | |
ebc8d2ab DM |
409 | wrb = queue_head_node(txq); |
410 | wrb_fill(wrb, busaddr, frag->size); | |
411 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
412 | queue_head_inc(txq); | |
413 | copied += frag->size; | |
6b7c5b94 SP |
414 | } |
415 | ||
416 | if (dummy_wrb) { | |
417 | wrb = queue_head_node(txq); | |
418 | wrb_fill(wrb, 0, 0); | |
419 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
420 | queue_head_inc(txq); | |
421 | } | |
422 | ||
423 | wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, | |
424 | wrb_cnt, copied); | |
425 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | |
426 | ||
427 | return copied; | |
428 | } | |
429 | ||
61357325 SH |
430 | static netdev_tx_t be_xmit(struct sk_buff *skb, |
431 | struct net_device *netdev) | |
6b7c5b94 SP |
432 | { |
433 | struct be_adapter *adapter = netdev_priv(netdev); | |
434 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | |
435 | struct be_queue_info *txq = &tx_obj->q; | |
436 | u32 wrb_cnt = 0, copied = 0; | |
437 | u32 start = txq->head; | |
438 | bool dummy_wrb, stopped = false; | |
439 | ||
440 | wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); | |
441 | ||
442 | copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); | |
443 | ||
444 | /* record the sent skb in the sent_skb table */ | |
445 | BUG_ON(tx_obj->sent_skb_list[start]); | |
446 | tx_obj->sent_skb_list[start] = skb; | |
447 | ||
448 | /* Ensure that txq has space for the next skb; Else stop the queue | |
449 | * *BEFORE* ringing the tx doorbell, so that we serialze the | |
450 | * tx compls of the current transmit which'll wake up the queue | |
451 | */ | |
452 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { | |
453 | netif_stop_queue(netdev); | |
454 | stopped = true; | |
455 | } | |
456 | ||
8788fdc2 | 457 | be_txq_notify(adapter, txq->id, wrb_cnt); |
6b7c5b94 | 458 | |
6b7c5b94 SP |
459 | be_tx_stats_update(adapter, wrb_cnt, copied, stopped); |
460 | return NETDEV_TX_OK; | |
461 | } | |
462 | ||
463 | static int be_change_mtu(struct net_device *netdev, int new_mtu) | |
464 | { | |
465 | struct be_adapter *adapter = netdev_priv(netdev); | |
466 | if (new_mtu < BE_MIN_MTU || | |
467 | new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { | |
468 | dev_info(&adapter->pdev->dev, | |
469 | "MTU must be between %d and %d bytes\n", | |
470 | BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); | |
471 | return -EINVAL; | |
472 | } | |
473 | dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", | |
474 | netdev->mtu, new_mtu); | |
475 | netdev->mtu = new_mtu; | |
476 | return 0; | |
477 | } | |
478 | ||
479 | /* | |
480 | * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, | |
481 | * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, | |
482 | * set the BE in promiscuous VLAN mode. | |
483 | */ | |
1ab1ab75 | 484 | static void be_vid_config(struct net_device *netdev) |
6b7c5b94 SP |
485 | { |
486 | struct be_adapter *adapter = netdev_priv(netdev); | |
487 | u16 vtag[BE_NUM_VLANS_SUPPORTED]; | |
488 | u16 ntags = 0, i; | |
489 | ||
490 | if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { | |
491 | /* Construct VLAN Table to give to HW */ | |
492 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | |
493 | if (adapter->vlan_tag[i]) { | |
494 | vtag[ntags] = cpu_to_le16(i); | |
495 | ntags++; | |
496 | } | |
497 | } | |
8788fdc2 | 498 | be_cmd_vlan_config(adapter, adapter->if_handle, |
6b7c5b94 SP |
499 | vtag, ntags, 1, 0); |
500 | } else { | |
8788fdc2 | 501 | be_cmd_vlan_config(adapter, adapter->if_handle, |
6b7c5b94 SP |
502 | NULL, 0, 1, 1); |
503 | } | |
504 | } | |
505 | ||
506 | static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) | |
507 | { | |
508 | struct be_adapter *adapter = netdev_priv(netdev); | |
509 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
510 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
6b7c5b94 | 511 | |
8788fdc2 SP |
512 | be_eq_notify(adapter, rx_eq->q.id, false, false, 0); |
513 | be_eq_notify(adapter, tx_eq->q.id, false, false, 0); | |
6b7c5b94 | 514 | adapter->vlan_grp = grp; |
8788fdc2 SP |
515 | be_eq_notify(adapter, rx_eq->q.id, true, false, 0); |
516 | be_eq_notify(adapter, tx_eq->q.id, true, false, 0); | |
6b7c5b94 SP |
517 | } |
518 | ||
519 | static void be_vlan_add_vid(struct net_device *netdev, u16 vid) | |
520 | { | |
521 | struct be_adapter *adapter = netdev_priv(netdev); | |
522 | ||
523 | adapter->num_vlans++; | |
524 | adapter->vlan_tag[vid] = 1; | |
525 | ||
1ab1ab75 | 526 | be_vid_config(netdev); |
6b7c5b94 SP |
527 | } |
528 | ||
529 | static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) | |
530 | { | |
531 | struct be_adapter *adapter = netdev_priv(netdev); | |
532 | ||
533 | adapter->num_vlans--; | |
534 | adapter->vlan_tag[vid] = 0; | |
535 | ||
536 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); | |
1ab1ab75 | 537 | be_vid_config(netdev); |
6b7c5b94 SP |
538 | } |
539 | ||
24307eef | 540 | static void be_set_multicast_list(struct net_device *netdev) |
6b7c5b94 SP |
541 | { |
542 | struct be_adapter *adapter = netdev_priv(netdev); | |
6b7c5b94 | 543 | |
24307eef | 544 | if (netdev->flags & IFF_PROMISC) { |
8788fdc2 | 545 | be_cmd_promiscuous_config(adapter, adapter->port_num, 1); |
24307eef SP |
546 | adapter->promiscuous = true; |
547 | goto done; | |
6b7c5b94 SP |
548 | } |
549 | ||
24307eef SP |
550 | /* BE was previously in promiscous mode; disable it */ |
551 | if (adapter->promiscuous) { | |
552 | adapter->promiscuous = false; | |
8788fdc2 | 553 | be_cmd_promiscuous_config(adapter, adapter->port_num, 0); |
6b7c5b94 SP |
554 | } |
555 | ||
24307eef | 556 | if (netdev->flags & IFF_ALLMULTI) { |
8788fdc2 | 557 | be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0); |
24307eef | 558 | goto done; |
6b7c5b94 | 559 | } |
6b7c5b94 | 560 | |
8788fdc2 | 561 | be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list, |
24307eef SP |
562 | netdev->mc_count); |
563 | done: | |
564 | return; | |
6b7c5b94 SP |
565 | } |
566 | ||
4097f663 | 567 | static void be_rx_rate_update(struct be_adapter *adapter) |
6b7c5b94 | 568 | { |
4097f663 SP |
569 | struct be_drvr_stats *stats = drvr_stats(adapter); |
570 | ulong now = jiffies; | |
6b7c5b94 | 571 | |
4097f663 SP |
572 | /* Wrapped around */ |
573 | if (time_before(now, stats->be_rx_jiffies)) { | |
574 | stats->be_rx_jiffies = now; | |
575 | return; | |
576 | } | |
6b7c5b94 SP |
577 | |
578 | /* Update the rate once in two seconds */ | |
4097f663 | 579 | if ((now - stats->be_rx_jiffies) < 2 * HZ) |
6b7c5b94 SP |
580 | return; |
581 | ||
65f71b8b SH |
582 | stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes |
583 | - stats->be_rx_bytes_prev, | |
584 | now - stats->be_rx_jiffies); | |
4097f663 | 585 | stats->be_rx_jiffies = now; |
6b7c5b94 SP |
586 | stats->be_rx_bytes_prev = stats->be_rx_bytes; |
587 | } | |
588 | ||
4097f663 SP |
589 | static void be_rx_stats_update(struct be_adapter *adapter, |
590 | u32 pktsize, u16 numfrags) | |
591 | { | |
592 | struct be_drvr_stats *stats = drvr_stats(adapter); | |
593 | ||
594 | stats->be_rx_compl++; | |
595 | stats->be_rx_frags += numfrags; | |
596 | stats->be_rx_bytes += pktsize; | |
597 | } | |
598 | ||
728a9972 AK |
599 | static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) |
600 | { | |
601 | u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk; | |
602 | ||
603 | l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); | |
604 | ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); | |
605 | ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); | |
606 | if (ip_version) { | |
607 | tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); | |
608 | udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp); | |
609 | } | |
610 | ipv6_chk = (ip_version && (tcpf || udpf)); | |
611 | ||
612 | return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true; | |
613 | } | |
614 | ||
6b7c5b94 SP |
615 | static struct be_rx_page_info * |
616 | get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) | |
617 | { | |
618 | struct be_rx_page_info *rx_page_info; | |
619 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
620 | ||
621 | rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; | |
622 | BUG_ON(!rx_page_info->page); | |
623 | ||
624 | if (rx_page_info->last_page_user) | |
625 | pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), | |
626 | adapter->big_page_size, PCI_DMA_FROMDEVICE); | |
627 | ||
628 | atomic_dec(&rxq->used); | |
629 | return rx_page_info; | |
630 | } | |
631 | ||
632 | /* Throwaway the data in the Rx completion */ | |
633 | static void be_rx_compl_discard(struct be_adapter *adapter, | |
634 | struct be_eth_rx_compl *rxcp) | |
635 | { | |
636 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
637 | struct be_rx_page_info *page_info; | |
638 | u16 rxq_idx, i, num_rcvd; | |
639 | ||
640 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
641 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
642 | ||
643 | for (i = 0; i < num_rcvd; i++) { | |
644 | page_info = get_rx_page_info(adapter, rxq_idx); | |
645 | put_page(page_info->page); | |
646 | memset(page_info, 0, sizeof(*page_info)); | |
647 | index_inc(&rxq_idx, rxq->len); | |
648 | } | |
649 | } | |
650 | ||
651 | /* | |
652 | * skb_fill_rx_data forms a complete skb for an ether frame | |
653 | * indicated by rxcp. | |
654 | */ | |
655 | static void skb_fill_rx_data(struct be_adapter *adapter, | |
656 | struct sk_buff *skb, struct be_eth_rx_compl *rxcp) | |
657 | { | |
658 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
659 | struct be_rx_page_info *page_info; | |
bd46cb6c | 660 | u16 rxq_idx, i, num_rcvd, j; |
fa77406a | 661 | u32 pktsize, hdr_len, curr_frag_len, size; |
6b7c5b94 SP |
662 | u8 *start; |
663 | ||
664 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
665 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | |
666 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
667 | ||
668 | page_info = get_rx_page_info(adapter, rxq_idx); | |
669 | ||
670 | start = page_address(page_info->page) + page_info->page_offset; | |
671 | prefetch(start); | |
672 | ||
673 | /* Copy data in the first descriptor of this completion */ | |
674 | curr_frag_len = min(pktsize, rx_frag_size); | |
675 | ||
676 | /* Copy the header portion into skb_data */ | |
677 | hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); | |
678 | memcpy(skb->data, start, hdr_len); | |
679 | skb->len = curr_frag_len; | |
680 | if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ | |
681 | /* Complete packet has now been moved to data */ | |
682 | put_page(page_info->page); | |
683 | skb->data_len = 0; | |
684 | skb->tail += curr_frag_len; | |
685 | } else { | |
686 | skb_shinfo(skb)->nr_frags = 1; | |
687 | skb_shinfo(skb)->frags[0].page = page_info->page; | |
688 | skb_shinfo(skb)->frags[0].page_offset = | |
689 | page_info->page_offset + hdr_len; | |
690 | skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; | |
691 | skb->data_len = curr_frag_len - hdr_len; | |
692 | skb->tail += hdr_len; | |
693 | } | |
694 | memset(page_info, 0, sizeof(*page_info)); | |
695 | ||
696 | if (pktsize <= rx_frag_size) { | |
697 | BUG_ON(num_rcvd != 1); | |
76fbb429 | 698 | goto done; |
6b7c5b94 SP |
699 | } |
700 | ||
701 | /* More frags present for this completion */ | |
fa77406a | 702 | size = pktsize; |
bd46cb6c | 703 | for (i = 1, j = 0; i < num_rcvd; i++) { |
fa77406a | 704 | size -= curr_frag_len; |
6b7c5b94 SP |
705 | index_inc(&rxq_idx, rxq->len); |
706 | page_info = get_rx_page_info(adapter, rxq_idx); | |
707 | ||
fa77406a | 708 | curr_frag_len = min(size, rx_frag_size); |
6b7c5b94 | 709 | |
bd46cb6c AK |
710 | /* Coalesce all frags from the same physical page in one slot */ |
711 | if (page_info->page_offset == 0) { | |
712 | /* Fresh page */ | |
713 | j++; | |
714 | skb_shinfo(skb)->frags[j].page = page_info->page; | |
715 | skb_shinfo(skb)->frags[j].page_offset = | |
716 | page_info->page_offset; | |
717 | skb_shinfo(skb)->frags[j].size = 0; | |
718 | skb_shinfo(skb)->nr_frags++; | |
719 | } else { | |
720 | put_page(page_info->page); | |
721 | } | |
722 | ||
723 | skb_shinfo(skb)->frags[j].size += curr_frag_len; | |
6b7c5b94 SP |
724 | skb->len += curr_frag_len; |
725 | skb->data_len += curr_frag_len; | |
6b7c5b94 SP |
726 | |
727 | memset(page_info, 0, sizeof(*page_info)); | |
728 | } | |
bd46cb6c | 729 | BUG_ON(j > MAX_SKB_FRAGS); |
6b7c5b94 | 730 | |
76fbb429 | 731 | done: |
4097f663 | 732 | be_rx_stats_update(adapter, pktsize, num_rcvd); |
6b7c5b94 SP |
733 | return; |
734 | } | |
735 | ||
5be93b9a | 736 | /* Process the RX completion indicated by rxcp when GRO is disabled */ |
6b7c5b94 SP |
737 | static void be_rx_compl_process(struct be_adapter *adapter, |
738 | struct be_eth_rx_compl *rxcp) | |
739 | { | |
740 | struct sk_buff *skb; | |
741 | u32 vtp, vid; | |
6b7c5b94 | 742 | |
6b7c5b94 SP |
743 | vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); |
744 | ||
745 | skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); | |
746 | if (!skb) { | |
747 | if (net_ratelimit()) | |
748 | dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); | |
749 | be_rx_compl_discard(adapter, rxcp); | |
750 | return; | |
751 | } | |
752 | ||
753 | skb_reserve(skb, NET_IP_ALIGN); | |
754 | ||
755 | skb_fill_rx_data(adapter, skb, rxcp); | |
756 | ||
728a9972 | 757 | if (do_pkt_csum(rxcp, adapter->rx_csum)) |
6b7c5b94 | 758 | skb->ip_summed = CHECKSUM_NONE; |
728a9972 AK |
759 | else |
760 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
6b7c5b94 SP |
761 | |
762 | skb->truesize = skb->len + sizeof(struct sk_buff); | |
763 | skb->protocol = eth_type_trans(skb, adapter->netdev); | |
764 | skb->dev = adapter->netdev; | |
765 | ||
766 | if (vtp) { | |
767 | if (!adapter->vlan_grp || adapter->num_vlans == 0) { | |
768 | kfree_skb(skb); | |
769 | return; | |
770 | } | |
771 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | |
772 | vid = be16_to_cpu(vid); | |
773 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); | |
774 | } else { | |
775 | netif_receive_skb(skb); | |
776 | } | |
777 | ||
6b7c5b94 SP |
778 | return; |
779 | } | |
780 | ||
5be93b9a AK |
781 | /* Process the RX completion indicated by rxcp when GRO is enabled */ |
782 | static void be_rx_compl_process_gro(struct be_adapter *adapter, | |
6b7c5b94 SP |
783 | struct be_eth_rx_compl *rxcp) |
784 | { | |
785 | struct be_rx_page_info *page_info; | |
5be93b9a | 786 | struct sk_buff *skb = NULL; |
6b7c5b94 | 787 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
5be93b9a | 788 | struct be_eq_obj *eq_obj = &adapter->rx_eq; |
6b7c5b94 | 789 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; |
bd46cb6c | 790 | u16 i, rxq_idx = 0, vid, j; |
6b7c5b94 SP |
791 | |
792 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
793 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | |
794 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | |
795 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
796 | ||
5be93b9a AK |
797 | skb = napi_get_frags(&eq_obj->napi); |
798 | if (!skb) { | |
799 | be_rx_compl_discard(adapter, rxcp); | |
800 | return; | |
801 | } | |
802 | ||
6b7c5b94 | 803 | remaining = pkt_size; |
bd46cb6c | 804 | for (i = 0, j = -1; i < num_rcvd; i++) { |
6b7c5b94 SP |
805 | page_info = get_rx_page_info(adapter, rxq_idx); |
806 | ||
807 | curr_frag_len = min(remaining, rx_frag_size); | |
808 | ||
bd46cb6c AK |
809 | /* Coalesce all frags from the same physical page in one slot */ |
810 | if (i == 0 || page_info->page_offset == 0) { | |
811 | /* First frag or Fresh page */ | |
812 | j++; | |
5be93b9a AK |
813 | skb_shinfo(skb)->frags[j].page = page_info->page; |
814 | skb_shinfo(skb)->frags[j].page_offset = | |
815 | page_info->page_offset; | |
816 | skb_shinfo(skb)->frags[j].size = 0; | |
bd46cb6c AK |
817 | } else { |
818 | put_page(page_info->page); | |
819 | } | |
5be93b9a | 820 | skb_shinfo(skb)->frags[j].size += curr_frag_len; |
6b7c5b94 | 821 | |
bd46cb6c | 822 | remaining -= curr_frag_len; |
6b7c5b94 | 823 | index_inc(&rxq_idx, rxq->len); |
6b7c5b94 SP |
824 | memset(page_info, 0, sizeof(*page_info)); |
825 | } | |
bd46cb6c | 826 | BUG_ON(j > MAX_SKB_FRAGS); |
6b7c5b94 | 827 | |
5be93b9a AK |
828 | skb_shinfo(skb)->nr_frags = j + 1; |
829 | skb->len = pkt_size; | |
830 | skb->data_len = pkt_size; | |
831 | skb->truesize += pkt_size; | |
832 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
833 | ||
6b7c5b94 | 834 | if (likely(!vlanf)) { |
5be93b9a | 835 | napi_gro_frags(&eq_obj->napi); |
6b7c5b94 SP |
836 | } else { |
837 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | |
838 | vid = be16_to_cpu(vid); | |
839 | ||
840 | if (!adapter->vlan_grp || adapter->num_vlans == 0) | |
841 | return; | |
842 | ||
5be93b9a | 843 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); |
6b7c5b94 SP |
844 | } |
845 | ||
4097f663 | 846 | be_rx_stats_update(adapter, pkt_size, num_rcvd); |
6b7c5b94 SP |
847 | return; |
848 | } | |
849 | ||
850 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) | |
851 | { | |
852 | struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); | |
853 | ||
854 | if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) | |
855 | return NULL; | |
856 | ||
857 | be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); | |
858 | ||
6b7c5b94 SP |
859 | queue_tail_inc(&adapter->rx_obj.cq); |
860 | return rxcp; | |
861 | } | |
862 | ||
a7a0ef31 SP |
863 | /* To reset the valid bit, we need to reset the whole word as |
864 | * when walking the queue the valid entries are little-endian | |
865 | * and invalid entries are host endian | |
866 | */ | |
867 | static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) | |
868 | { | |
869 | rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; | |
870 | } | |
871 | ||
6b7c5b94 SP |
872 | static inline struct page *be_alloc_pages(u32 size) |
873 | { | |
874 | gfp_t alloc_flags = GFP_ATOMIC; | |
875 | u32 order = get_order(size); | |
876 | if (order > 0) | |
877 | alloc_flags |= __GFP_COMP; | |
878 | return alloc_pages(alloc_flags, order); | |
879 | } | |
880 | ||
881 | /* | |
882 | * Allocate a page, split it to fragments of size rx_frag_size and post as | |
883 | * receive buffers to BE | |
884 | */ | |
885 | static void be_post_rx_frags(struct be_adapter *adapter) | |
886 | { | |
887 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | |
888 | struct be_rx_page_info *page_info = NULL; | |
889 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
890 | struct page *pagep = NULL; | |
891 | struct be_eth_rx_d *rxd; | |
892 | u64 page_dmaaddr = 0, frag_dmaaddr; | |
893 | u32 posted, page_offset = 0; | |
894 | ||
6b7c5b94 SP |
895 | page_info = &page_info_tbl[rxq->head]; |
896 | for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { | |
897 | if (!pagep) { | |
898 | pagep = be_alloc_pages(adapter->big_page_size); | |
899 | if (unlikely(!pagep)) { | |
900 | drvr_stats(adapter)->be_ethrx_post_fail++; | |
901 | break; | |
902 | } | |
903 | page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, | |
904 | adapter->big_page_size, | |
905 | PCI_DMA_FROMDEVICE); | |
906 | page_info->page_offset = 0; | |
907 | } else { | |
908 | get_page(pagep); | |
909 | page_info->page_offset = page_offset + rx_frag_size; | |
910 | } | |
911 | page_offset = page_info->page_offset; | |
912 | page_info->page = pagep; | |
913 | pci_unmap_addr_set(page_info, bus, page_dmaaddr); | |
914 | frag_dmaaddr = page_dmaaddr + page_info->page_offset; | |
915 | ||
916 | rxd = queue_head_node(rxq); | |
917 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | |
918 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | |
919 | queue_head_inc(rxq); | |
920 | ||
921 | /* Any space left in the current big page for another frag? */ | |
922 | if ((page_offset + rx_frag_size + rx_frag_size) > | |
923 | adapter->big_page_size) { | |
924 | pagep = NULL; | |
925 | page_info->last_page_user = true; | |
926 | } | |
927 | page_info = &page_info_tbl[rxq->head]; | |
928 | } | |
929 | if (pagep) | |
930 | page_info->last_page_user = true; | |
931 | ||
932 | if (posted) { | |
6b7c5b94 | 933 | atomic_add(posted, &rxq->used); |
8788fdc2 | 934 | be_rxq_notify(adapter, rxq->id, posted); |
ea1dae11 SP |
935 | } else if (atomic_read(&rxq->used) == 0) { |
936 | /* Let be_worker replenish when memory is available */ | |
937 | adapter->rx_post_starved = true; | |
6b7c5b94 SP |
938 | } |
939 | ||
940 | return; | |
941 | } | |
942 | ||
5fb379ee | 943 | static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) |
6b7c5b94 | 944 | { |
6b7c5b94 SP |
945 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); |
946 | ||
947 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) | |
948 | return NULL; | |
949 | ||
950 | be_dws_le_to_cpu(txcp, sizeof(*txcp)); | |
951 | ||
952 | txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; | |
953 | ||
954 | queue_tail_inc(tx_cq); | |
955 | return txcp; | |
956 | } | |
957 | ||
958 | static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | |
959 | { | |
960 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
961 | struct be_eth_wrb *wrb; | |
962 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | |
963 | struct sk_buff *sent_skb; | |
964 | u64 busaddr; | |
965 | u16 cur_index, num_wrbs = 0; | |
966 | ||
967 | cur_index = txq->tail; | |
968 | sent_skb = sent_skbs[cur_index]; | |
969 | BUG_ON(!sent_skb); | |
970 | sent_skbs[cur_index] = NULL; | |
971 | ||
972 | do { | |
973 | cur_index = txq->tail; | |
974 | wrb = queue_tail_node(txq); | |
975 | be_dws_le_to_cpu(wrb, sizeof(*wrb)); | |
976 | busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; | |
977 | if (busaddr != 0) { | |
978 | pci_unmap_single(adapter->pdev, busaddr, | |
979 | wrb->frag_len, PCI_DMA_TODEVICE); | |
980 | } | |
981 | num_wrbs++; | |
982 | queue_tail_inc(txq); | |
983 | } while (cur_index != last_index); | |
984 | ||
985 | atomic_sub(num_wrbs, &txq->used); | |
986 | ||
987 | kfree_skb(sent_skb); | |
988 | } | |
989 | ||
859b1e4e SP |
990 | static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) |
991 | { | |
992 | struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q); | |
993 | ||
994 | if (!eqe->evt) | |
995 | return NULL; | |
996 | ||
997 | eqe->evt = le32_to_cpu(eqe->evt); | |
998 | queue_tail_inc(&eq_obj->q); | |
999 | return eqe; | |
1000 | } | |
1001 | ||
1002 | static int event_handle(struct be_adapter *adapter, | |
1003 | struct be_eq_obj *eq_obj) | |
1004 | { | |
1005 | struct be_eq_entry *eqe; | |
1006 | u16 num = 0; | |
1007 | ||
1008 | while ((eqe = event_get(eq_obj)) != NULL) { | |
1009 | eqe->evt = 0; | |
1010 | num++; | |
1011 | } | |
1012 | ||
1013 | /* Deal with any spurious interrupts that come | |
1014 | * without events | |
1015 | */ | |
1016 | be_eq_notify(adapter, eq_obj->q.id, true, true, num); | |
1017 | if (num) | |
1018 | napi_schedule(&eq_obj->napi); | |
1019 | ||
1020 | return num; | |
1021 | } | |
1022 | ||
1023 | /* Just read and notify events without processing them. | |
1024 | * Used at the time of destroying event queues */ | |
1025 | static void be_eq_clean(struct be_adapter *adapter, | |
1026 | struct be_eq_obj *eq_obj) | |
1027 | { | |
1028 | struct be_eq_entry *eqe; | |
1029 | u16 num = 0; | |
1030 | ||
1031 | while ((eqe = event_get(eq_obj)) != NULL) { | |
1032 | eqe->evt = 0; | |
1033 | num++; | |
1034 | } | |
1035 | ||
1036 | if (num) | |
1037 | be_eq_notify(adapter, eq_obj->q.id, false, true, num); | |
1038 | } | |
1039 | ||
6b7c5b94 SP |
1040 | static void be_rx_q_clean(struct be_adapter *adapter) |
1041 | { | |
1042 | struct be_rx_page_info *page_info; | |
1043 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
1044 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | |
1045 | struct be_eth_rx_compl *rxcp; | |
1046 | u16 tail; | |
1047 | ||
1048 | /* First cleanup pending rx completions */ | |
1049 | while ((rxcp = be_rx_compl_get(adapter)) != NULL) { | |
1050 | be_rx_compl_discard(adapter, rxcp); | |
a7a0ef31 | 1051 | be_rx_compl_reset(rxcp); |
8788fdc2 | 1052 | be_cq_notify(adapter, rx_cq->id, true, 1); |
6b7c5b94 SP |
1053 | } |
1054 | ||
1055 | /* Then free posted rx buffer that were not used */ | |
1056 | tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; | |
cdab23b7 | 1057 | for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { |
6b7c5b94 SP |
1058 | page_info = get_rx_page_info(adapter, tail); |
1059 | put_page(page_info->page); | |
1060 | memset(page_info, 0, sizeof(*page_info)); | |
1061 | } | |
1062 | BUG_ON(atomic_read(&rxq->used)); | |
1063 | } | |
1064 | ||
a8e9179a | 1065 | static void be_tx_compl_clean(struct be_adapter *adapter) |
6b7c5b94 | 1066 | { |
a8e9179a | 1067 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; |
6b7c5b94 | 1068 | struct be_queue_info *txq = &adapter->tx_obj.q; |
a8e9179a SP |
1069 | struct be_eth_tx_compl *txcp; |
1070 | u16 end_idx, cmpl = 0, timeo = 0; | |
1071 | ||
1072 | /* Wait for a max of 200ms for all the tx-completions to arrive. */ | |
1073 | do { | |
1074 | while ((txcp = be_tx_compl_get(tx_cq))) { | |
1075 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, | |
1076 | wrb_index, txcp); | |
1077 | be_tx_compl_process(adapter, end_idx); | |
1078 | cmpl++; | |
1079 | } | |
1080 | if (cmpl) { | |
1081 | be_cq_notify(adapter, tx_cq->id, false, cmpl); | |
1082 | cmpl = 0; | |
1083 | } | |
1084 | ||
1085 | if (atomic_read(&txq->used) == 0 || ++timeo > 200) | |
1086 | break; | |
1087 | ||
1088 | mdelay(1); | |
1089 | } while (true); | |
1090 | ||
1091 | if (atomic_read(&txq->used)) | |
1092 | dev_err(&adapter->pdev->dev, "%d pending tx-completions\n", | |
1093 | atomic_read(&txq->used)); | |
6b7c5b94 SP |
1094 | } |
1095 | ||
5fb379ee SP |
1096 | static void be_mcc_queues_destroy(struct be_adapter *adapter) |
1097 | { | |
1098 | struct be_queue_info *q; | |
5fb379ee | 1099 | |
8788fdc2 | 1100 | q = &adapter->mcc_obj.q; |
5fb379ee | 1101 | if (q->created) |
8788fdc2 | 1102 | be_cmd_q_destroy(adapter, q, QTYPE_MCCQ); |
5fb379ee SP |
1103 | be_queue_free(adapter, q); |
1104 | ||
8788fdc2 | 1105 | q = &adapter->mcc_obj.cq; |
5fb379ee | 1106 | if (q->created) |
8788fdc2 | 1107 | be_cmd_q_destroy(adapter, q, QTYPE_CQ); |
5fb379ee SP |
1108 | be_queue_free(adapter, q); |
1109 | } | |
1110 | ||
1111 | /* Must be called only after TX qs are created as MCC shares TX EQ */ | |
1112 | static int be_mcc_queues_create(struct be_adapter *adapter) | |
1113 | { | |
1114 | struct be_queue_info *q, *cq; | |
5fb379ee SP |
1115 | |
1116 | /* Alloc MCC compl queue */ | |
8788fdc2 | 1117 | cq = &adapter->mcc_obj.cq; |
5fb379ee | 1118 | if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, |
efd2e40a | 1119 | sizeof(struct be_mcc_compl))) |
5fb379ee SP |
1120 | goto err; |
1121 | ||
1122 | /* Ask BE to create MCC compl queue; share TX's eq */ | |
8788fdc2 | 1123 | if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0)) |
5fb379ee SP |
1124 | goto mcc_cq_free; |
1125 | ||
1126 | /* Alloc MCC queue */ | |
8788fdc2 | 1127 | q = &adapter->mcc_obj.q; |
5fb379ee SP |
1128 | if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) |
1129 | goto mcc_cq_destroy; | |
1130 | ||
1131 | /* Ask BE to create MCC queue */ | |
8788fdc2 | 1132 | if (be_cmd_mccq_create(adapter, q, cq)) |
5fb379ee SP |
1133 | goto mcc_q_free; |
1134 | ||
1135 | return 0; | |
1136 | ||
1137 | mcc_q_free: | |
1138 | be_queue_free(adapter, q); | |
1139 | mcc_cq_destroy: | |
8788fdc2 | 1140 | be_cmd_q_destroy(adapter, cq, QTYPE_CQ); |
5fb379ee SP |
1141 | mcc_cq_free: |
1142 | be_queue_free(adapter, cq); | |
1143 | err: | |
1144 | return -1; | |
1145 | } | |
1146 | ||
6b7c5b94 SP |
1147 | static void be_tx_queues_destroy(struct be_adapter *adapter) |
1148 | { | |
1149 | struct be_queue_info *q; | |
1150 | ||
1151 | q = &adapter->tx_obj.q; | |
a8e9179a | 1152 | if (q->created) |
8788fdc2 | 1153 | be_cmd_q_destroy(adapter, q, QTYPE_TXQ); |
6b7c5b94 SP |
1154 | be_queue_free(adapter, q); |
1155 | ||
1156 | q = &adapter->tx_obj.cq; | |
1157 | if (q->created) | |
8788fdc2 | 1158 | be_cmd_q_destroy(adapter, q, QTYPE_CQ); |
6b7c5b94 SP |
1159 | be_queue_free(adapter, q); |
1160 | ||
859b1e4e SP |
1161 | /* Clear any residual events */ |
1162 | be_eq_clean(adapter, &adapter->tx_eq); | |
1163 | ||
6b7c5b94 SP |
1164 | q = &adapter->tx_eq.q; |
1165 | if (q->created) | |
8788fdc2 | 1166 | be_cmd_q_destroy(adapter, q, QTYPE_EQ); |
6b7c5b94 SP |
1167 | be_queue_free(adapter, q); |
1168 | } | |
1169 | ||
1170 | static int be_tx_queues_create(struct be_adapter *adapter) | |
1171 | { | |
1172 | struct be_queue_info *eq, *q, *cq; | |
1173 | ||
1174 | adapter->tx_eq.max_eqd = 0; | |
1175 | adapter->tx_eq.min_eqd = 0; | |
1176 | adapter->tx_eq.cur_eqd = 96; | |
1177 | adapter->tx_eq.enable_aic = false; | |
1178 | /* Alloc Tx Event queue */ | |
1179 | eq = &adapter->tx_eq.q; | |
1180 | if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) | |
1181 | return -1; | |
1182 | ||
1183 | /* Ask BE to create Tx Event queue */ | |
8788fdc2 | 1184 | if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) |
6b7c5b94 SP |
1185 | goto tx_eq_free; |
1186 | /* Alloc TX eth compl queue */ | |
1187 | cq = &adapter->tx_obj.cq; | |
1188 | if (be_queue_alloc(adapter, cq, TX_CQ_LEN, | |
1189 | sizeof(struct be_eth_tx_compl))) | |
1190 | goto tx_eq_destroy; | |
1191 | ||
1192 | /* Ask BE to create Tx eth compl queue */ | |
8788fdc2 | 1193 | if (be_cmd_cq_create(adapter, cq, eq, false, false, 3)) |
6b7c5b94 SP |
1194 | goto tx_cq_free; |
1195 | ||
1196 | /* Alloc TX eth queue */ | |
1197 | q = &adapter->tx_obj.q; | |
1198 | if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) | |
1199 | goto tx_cq_destroy; | |
1200 | ||
1201 | /* Ask BE to create Tx eth queue */ | |
8788fdc2 | 1202 | if (be_cmd_txq_create(adapter, q, cq)) |
6b7c5b94 SP |
1203 | goto tx_q_free; |
1204 | return 0; | |
1205 | ||
1206 | tx_q_free: | |
1207 | be_queue_free(adapter, q); | |
1208 | tx_cq_destroy: | |
8788fdc2 | 1209 | be_cmd_q_destroy(adapter, cq, QTYPE_CQ); |
6b7c5b94 SP |
1210 | tx_cq_free: |
1211 | be_queue_free(adapter, cq); | |
1212 | tx_eq_destroy: | |
8788fdc2 | 1213 | be_cmd_q_destroy(adapter, eq, QTYPE_EQ); |
6b7c5b94 SP |
1214 | tx_eq_free: |
1215 | be_queue_free(adapter, eq); | |
1216 | return -1; | |
1217 | } | |
1218 | ||
1219 | static void be_rx_queues_destroy(struct be_adapter *adapter) | |
1220 | { | |
1221 | struct be_queue_info *q; | |
1222 | ||
1223 | q = &adapter->rx_obj.q; | |
1224 | if (q->created) { | |
8788fdc2 | 1225 | be_cmd_q_destroy(adapter, q, QTYPE_RXQ); |
6b7c5b94 SP |
1226 | be_rx_q_clean(adapter); |
1227 | } | |
1228 | be_queue_free(adapter, q); | |
1229 | ||
1230 | q = &adapter->rx_obj.cq; | |
1231 | if (q->created) | |
8788fdc2 | 1232 | be_cmd_q_destroy(adapter, q, QTYPE_CQ); |
6b7c5b94 SP |
1233 | be_queue_free(adapter, q); |
1234 | ||
859b1e4e SP |
1235 | /* Clear any residual events */ |
1236 | be_eq_clean(adapter, &adapter->rx_eq); | |
1237 | ||
6b7c5b94 SP |
1238 | q = &adapter->rx_eq.q; |
1239 | if (q->created) | |
8788fdc2 | 1240 | be_cmd_q_destroy(adapter, q, QTYPE_EQ); |
6b7c5b94 SP |
1241 | be_queue_free(adapter, q); |
1242 | } | |
1243 | ||
1244 | static int be_rx_queues_create(struct be_adapter *adapter) | |
1245 | { | |
1246 | struct be_queue_info *eq, *q, *cq; | |
1247 | int rc; | |
1248 | ||
6b7c5b94 SP |
1249 | adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; |
1250 | adapter->rx_eq.max_eqd = BE_MAX_EQD; | |
1251 | adapter->rx_eq.min_eqd = 0; | |
1252 | adapter->rx_eq.cur_eqd = 0; | |
1253 | adapter->rx_eq.enable_aic = true; | |
1254 | ||
1255 | /* Alloc Rx Event queue */ | |
1256 | eq = &adapter->rx_eq.q; | |
1257 | rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, | |
1258 | sizeof(struct be_eq_entry)); | |
1259 | if (rc) | |
1260 | return rc; | |
1261 | ||
1262 | /* Ask BE to create Rx Event queue */ | |
8788fdc2 | 1263 | rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd); |
6b7c5b94 SP |
1264 | if (rc) |
1265 | goto rx_eq_free; | |
1266 | ||
1267 | /* Alloc RX eth compl queue */ | |
1268 | cq = &adapter->rx_obj.cq; | |
1269 | rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, | |
1270 | sizeof(struct be_eth_rx_compl)); | |
1271 | if (rc) | |
1272 | goto rx_eq_destroy; | |
1273 | ||
1274 | /* Ask BE to create Rx eth compl queue */ | |
8788fdc2 | 1275 | rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); |
6b7c5b94 SP |
1276 | if (rc) |
1277 | goto rx_cq_free; | |
1278 | ||
1279 | /* Alloc RX eth queue */ | |
1280 | q = &adapter->rx_obj.q; | |
1281 | rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); | |
1282 | if (rc) | |
1283 | goto rx_cq_destroy; | |
1284 | ||
1285 | /* Ask BE to create Rx eth queue */ | |
8788fdc2 | 1286 | rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size, |
6b7c5b94 SP |
1287 | BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); |
1288 | if (rc) | |
1289 | goto rx_q_free; | |
1290 | ||
1291 | return 0; | |
1292 | rx_q_free: | |
1293 | be_queue_free(adapter, q); | |
1294 | rx_cq_destroy: | |
8788fdc2 | 1295 | be_cmd_q_destroy(adapter, cq, QTYPE_CQ); |
6b7c5b94 SP |
1296 | rx_cq_free: |
1297 | be_queue_free(adapter, cq); | |
1298 | rx_eq_destroy: | |
8788fdc2 | 1299 | be_cmd_q_destroy(adapter, eq, QTYPE_EQ); |
6b7c5b94 SP |
1300 | rx_eq_free: |
1301 | be_queue_free(adapter, eq); | |
1302 | return rc; | |
1303 | } | |
6b7c5b94 | 1304 | |
b628bde2 SP |
1305 | /* There are 8 evt ids per func. Retruns the evt id's bit number */ |
1306 | static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) | |
1307 | { | |
1308 | return eq_id - 8 * be_pci_func(adapter); | |
1309 | } | |
1310 | ||
6b7c5b94 SP |
1311 | static irqreturn_t be_intx(int irq, void *dev) |
1312 | { | |
1313 | struct be_adapter *adapter = dev; | |
8788fdc2 | 1314 | int isr; |
6b7c5b94 | 1315 | |
8788fdc2 | 1316 | isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + |
eec368fb | 1317 | be_pci_func(adapter) * CEV_ISR_SIZE); |
c001c213 | 1318 | if (!isr) |
8788fdc2 | 1319 | return IRQ_NONE; |
6b7c5b94 | 1320 | |
8788fdc2 SP |
1321 | event_handle(adapter, &adapter->tx_eq); |
1322 | event_handle(adapter, &adapter->rx_eq); | |
c001c213 | 1323 | |
8788fdc2 | 1324 | return IRQ_HANDLED; |
6b7c5b94 SP |
1325 | } |
1326 | ||
1327 | static irqreturn_t be_msix_rx(int irq, void *dev) | |
1328 | { | |
1329 | struct be_adapter *adapter = dev; | |
1330 | ||
8788fdc2 | 1331 | event_handle(adapter, &adapter->rx_eq); |
6b7c5b94 SP |
1332 | |
1333 | return IRQ_HANDLED; | |
1334 | } | |
1335 | ||
5fb379ee | 1336 | static irqreturn_t be_msix_tx_mcc(int irq, void *dev) |
6b7c5b94 SP |
1337 | { |
1338 | struct be_adapter *adapter = dev; | |
1339 | ||
8788fdc2 | 1340 | event_handle(adapter, &adapter->tx_eq); |
6b7c5b94 SP |
1341 | |
1342 | return IRQ_HANDLED; | |
1343 | } | |
1344 | ||
5be93b9a | 1345 | static inline bool do_gro(struct be_adapter *adapter, |
6b7c5b94 SP |
1346 | struct be_eth_rx_compl *rxcp) |
1347 | { | |
1348 | int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); | |
1349 | int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); | |
1350 | ||
1351 | if (err) | |
1352 | drvr_stats(adapter)->be_rxcp_err++; | |
1353 | ||
5be93b9a | 1354 | return (tcp_frame && !err) ? true : false; |
6b7c5b94 SP |
1355 | } |
1356 | ||
1357 | int be_poll_rx(struct napi_struct *napi, int budget) | |
1358 | { | |
1359 | struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); | |
1360 | struct be_adapter *adapter = | |
1361 | container_of(rx_eq, struct be_adapter, rx_eq); | |
1362 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | |
1363 | struct be_eth_rx_compl *rxcp; | |
1364 | u32 work_done; | |
1365 | ||
1366 | for (work_done = 0; work_done < budget; work_done++) { | |
1367 | rxcp = be_rx_compl_get(adapter); | |
1368 | if (!rxcp) | |
1369 | break; | |
1370 | ||
5be93b9a AK |
1371 | if (do_gro(adapter, rxcp)) |
1372 | be_rx_compl_process_gro(adapter, rxcp); | |
6b7c5b94 SP |
1373 | else |
1374 | be_rx_compl_process(adapter, rxcp); | |
a7a0ef31 SP |
1375 | |
1376 | be_rx_compl_reset(rxcp); | |
6b7c5b94 SP |
1377 | } |
1378 | ||
6b7c5b94 SP |
1379 | /* Refill the queue */ |
1380 | if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) | |
1381 | be_post_rx_frags(adapter); | |
1382 | ||
1383 | /* All consumed */ | |
1384 | if (work_done < budget) { | |
1385 | napi_complete(napi); | |
8788fdc2 | 1386 | be_cq_notify(adapter, rx_cq->id, true, work_done); |
6b7c5b94 SP |
1387 | } else { |
1388 | /* More to be consumed; continue with interrupts disabled */ | |
8788fdc2 | 1389 | be_cq_notify(adapter, rx_cq->id, false, work_done); |
6b7c5b94 SP |
1390 | } |
1391 | return work_done; | |
1392 | } | |
1393 | ||
5fb379ee | 1394 | void be_process_tx(struct be_adapter *adapter) |
6b7c5b94 | 1395 | { |
5fb379ee SP |
1396 | struct be_queue_info *txq = &adapter->tx_obj.q; |
1397 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; | |
6b7c5b94 SP |
1398 | struct be_eth_tx_compl *txcp; |
1399 | u32 num_cmpl = 0; | |
1400 | u16 end_idx; | |
1401 | ||
5fb379ee | 1402 | while ((txcp = be_tx_compl_get(tx_cq))) { |
6b7c5b94 SP |
1403 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, |
1404 | wrb_index, txcp); | |
1405 | be_tx_compl_process(adapter, end_idx); | |
1406 | num_cmpl++; | |
1407 | } | |
1408 | ||
5fb379ee | 1409 | if (num_cmpl) { |
8788fdc2 | 1410 | be_cq_notify(adapter, tx_cq->id, true, num_cmpl); |
5fb379ee SP |
1411 | |
1412 | /* As Tx wrbs have been freed up, wake up netdev queue if | |
1413 | * it was stopped due to lack of tx wrbs. | |
1414 | */ | |
1415 | if (netif_queue_stopped(adapter->netdev) && | |
6b7c5b94 | 1416 | atomic_read(&txq->used) < txq->len / 2) { |
5fb379ee SP |
1417 | netif_wake_queue(adapter->netdev); |
1418 | } | |
1419 | ||
1420 | drvr_stats(adapter)->be_tx_events++; | |
1421 | drvr_stats(adapter)->be_tx_compl += num_cmpl; | |
6b7c5b94 | 1422 | } |
5fb379ee SP |
1423 | } |
1424 | ||
1425 | /* As TX and MCC share the same EQ check for both TX and MCC completions. | |
1426 | * For TX/MCC we don't honour budget; consume everything | |
1427 | */ | |
1428 | static int be_poll_tx_mcc(struct napi_struct *napi, int budget) | |
1429 | { | |
1430 | struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); | |
1431 | struct be_adapter *adapter = | |
1432 | container_of(tx_eq, struct be_adapter, tx_eq); | |
6b7c5b94 SP |
1433 | |
1434 | napi_complete(napi); | |
1435 | ||
5fb379ee | 1436 | be_process_tx(adapter); |
6b7c5b94 | 1437 | |
8788fdc2 | 1438 | be_process_mcc(adapter); |
6b7c5b94 SP |
1439 | |
1440 | return 1; | |
1441 | } | |
1442 | ||
ea1dae11 SP |
1443 | static void be_worker(struct work_struct *work) |
1444 | { | |
1445 | struct be_adapter *adapter = | |
1446 | container_of(work, struct be_adapter, work.work); | |
1447 | int status; | |
1448 | ||
ea1dae11 | 1449 | /* Get Stats */ |
8788fdc2 | 1450 | status = be_cmd_get_stats(adapter, &adapter->stats.cmd); |
ea1dae11 SP |
1451 | if (!status) |
1452 | netdev_stats_update(adapter); | |
1453 | ||
1454 | /* Set EQ delay */ | |
1455 | be_rx_eqd_update(adapter); | |
1456 | ||
4097f663 SP |
1457 | be_tx_rate_update(adapter); |
1458 | be_rx_rate_update(adapter); | |
1459 | ||
ea1dae11 SP |
1460 | if (adapter->rx_post_starved) { |
1461 | adapter->rx_post_starved = false; | |
1462 | be_post_rx_frags(adapter); | |
1463 | } | |
1464 | ||
1465 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | |
1466 | } | |
1467 | ||
6b7c5b94 SP |
1468 | static void be_msix_enable(struct be_adapter *adapter) |
1469 | { | |
1470 | int i, status; | |
1471 | ||
1472 | for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) | |
1473 | adapter->msix_entries[i].entry = i; | |
1474 | ||
1475 | status = pci_enable_msix(adapter->pdev, adapter->msix_entries, | |
1476 | BE_NUM_MSIX_VECTORS); | |
1477 | if (status == 0) | |
1478 | adapter->msix_enabled = true; | |
1479 | return; | |
1480 | } | |
1481 | ||
1482 | static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) | |
1483 | { | |
b628bde2 SP |
1484 | return adapter->msix_entries[ |
1485 | be_evt_bit_get(adapter, eq_id)].vector; | |
6b7c5b94 SP |
1486 | } |
1487 | ||
b628bde2 SP |
1488 | static int be_request_irq(struct be_adapter *adapter, |
1489 | struct be_eq_obj *eq_obj, | |
1490 | void *handler, char *desc) | |
6b7c5b94 SP |
1491 | { |
1492 | struct net_device *netdev = adapter->netdev; | |
b628bde2 SP |
1493 | int vec; |
1494 | ||
1495 | sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); | |
1496 | vec = be_msix_vec_get(adapter, eq_obj->q.id); | |
1497 | return request_irq(vec, handler, 0, eq_obj->desc, adapter); | |
1498 | } | |
1499 | ||
1500 | static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj) | |
1501 | { | |
1502 | int vec = be_msix_vec_get(adapter, eq_obj->q.id); | |
1503 | free_irq(vec, adapter); | |
1504 | } | |
6b7c5b94 | 1505 | |
b628bde2 SP |
1506 | static int be_msix_register(struct be_adapter *adapter) |
1507 | { | |
1508 | int status; | |
1509 | ||
1510 | status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx"); | |
6b7c5b94 SP |
1511 | if (status) |
1512 | goto err; | |
1513 | ||
b628bde2 SP |
1514 | status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx"); |
1515 | if (status) | |
1516 | goto free_tx_irq; | |
1517 | ||
6b7c5b94 | 1518 | return 0; |
b628bde2 SP |
1519 | |
1520 | free_tx_irq: | |
1521 | be_free_irq(adapter, &adapter->tx_eq); | |
6b7c5b94 SP |
1522 | err: |
1523 | dev_warn(&adapter->pdev->dev, | |
1524 | "MSIX Request IRQ failed - err %d\n", status); | |
1525 | pci_disable_msix(adapter->pdev); | |
1526 | adapter->msix_enabled = false; | |
1527 | return status; | |
1528 | } | |
1529 | ||
1530 | static int be_irq_register(struct be_adapter *adapter) | |
1531 | { | |
1532 | struct net_device *netdev = adapter->netdev; | |
1533 | int status; | |
1534 | ||
1535 | if (adapter->msix_enabled) { | |
1536 | status = be_msix_register(adapter); | |
1537 | if (status == 0) | |
1538 | goto done; | |
1539 | } | |
1540 | ||
1541 | /* INTx */ | |
1542 | netdev->irq = adapter->pdev->irq; | |
1543 | status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, | |
1544 | adapter); | |
1545 | if (status) { | |
1546 | dev_err(&adapter->pdev->dev, | |
1547 | "INTx request IRQ failed - err %d\n", status); | |
1548 | return status; | |
1549 | } | |
1550 | done: | |
1551 | adapter->isr_registered = true; | |
1552 | return 0; | |
1553 | } | |
1554 | ||
1555 | static void be_irq_unregister(struct be_adapter *adapter) | |
1556 | { | |
1557 | struct net_device *netdev = adapter->netdev; | |
6b7c5b94 SP |
1558 | |
1559 | if (!adapter->isr_registered) | |
1560 | return; | |
1561 | ||
1562 | /* INTx */ | |
1563 | if (!adapter->msix_enabled) { | |
1564 | free_irq(netdev->irq, adapter); | |
1565 | goto done; | |
1566 | } | |
1567 | ||
1568 | /* MSIx */ | |
b628bde2 SP |
1569 | be_free_irq(adapter, &adapter->tx_eq); |
1570 | be_free_irq(adapter, &adapter->rx_eq); | |
6b7c5b94 SP |
1571 | done: |
1572 | adapter->isr_registered = false; | |
1573 | return; | |
1574 | } | |
1575 | ||
1576 | static int be_open(struct net_device *netdev) | |
1577 | { | |
1578 | struct be_adapter *adapter = netdev_priv(netdev); | |
6b7c5b94 SP |
1579 | struct be_eq_obj *rx_eq = &adapter->rx_eq; |
1580 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
a8f447bd SP |
1581 | bool link_up; |
1582 | int status; | |
5fb379ee SP |
1583 | |
1584 | /* First time posting */ | |
1585 | be_post_rx_frags(adapter); | |
1586 | ||
1587 | napi_enable(&rx_eq->napi); | |
1588 | napi_enable(&tx_eq->napi); | |
1589 | ||
1590 | be_irq_register(adapter); | |
1591 | ||
8788fdc2 | 1592 | be_intr_set(adapter, true); |
5fb379ee SP |
1593 | |
1594 | /* The evt queues are created in unarmed state; arm them */ | |
8788fdc2 SP |
1595 | be_eq_notify(adapter, rx_eq->q.id, true, false, 0); |
1596 | be_eq_notify(adapter, tx_eq->q.id, true, false, 0); | |
5fb379ee SP |
1597 | |
1598 | /* Rx compl queue may be in unarmed state; rearm it */ | |
8788fdc2 | 1599 | be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); |
5fb379ee | 1600 | |
8788fdc2 | 1601 | status = be_cmd_link_status_query(adapter, &link_up); |
a8f447bd SP |
1602 | if (status) |
1603 | return status; | |
1604 | be_link_status_update(adapter, link_up); | |
5fb379ee SP |
1605 | |
1606 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | |
1607 | return 0; | |
1608 | } | |
1609 | ||
1610 | static int be_setup(struct be_adapter *adapter) | |
1611 | { | |
5fb379ee | 1612 | struct net_device *netdev = adapter->netdev; |
6b7c5b94 SP |
1613 | u32 if_flags; |
1614 | int status; | |
1615 | ||
1616 | if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | | |
1617 | BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | | |
1618 | BE_IF_FLAGS_PASS_L3L4_ERRORS; | |
8788fdc2 | 1619 | status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, |
6b7c5b94 SP |
1620 | false/* pmac_invalid */, &adapter->if_handle, |
1621 | &adapter->pmac_id); | |
1622 | if (status != 0) | |
1623 | goto do_none; | |
1624 | ||
1ab1ab75 SP |
1625 | be_vid_config(netdev); |
1626 | ||
8788fdc2 | 1627 | status = be_cmd_set_flow_control(adapter, true, true); |
6b7c5b94 SP |
1628 | if (status != 0) |
1629 | goto if_destroy; | |
1630 | ||
1631 | status = be_tx_queues_create(adapter); | |
1632 | if (status != 0) | |
1633 | goto if_destroy; | |
1634 | ||
1635 | status = be_rx_queues_create(adapter); | |
1636 | if (status != 0) | |
1637 | goto tx_qs_destroy; | |
1638 | ||
5fb379ee SP |
1639 | status = be_mcc_queues_create(adapter); |
1640 | if (status != 0) | |
1641 | goto rx_qs_destroy; | |
6b7c5b94 | 1642 | |
6b7c5b94 SP |
1643 | return 0; |
1644 | ||
5fb379ee SP |
1645 | rx_qs_destroy: |
1646 | be_rx_queues_destroy(adapter); | |
6b7c5b94 SP |
1647 | tx_qs_destroy: |
1648 | be_tx_queues_destroy(adapter); | |
1649 | if_destroy: | |
8788fdc2 | 1650 | be_cmd_if_destroy(adapter, adapter->if_handle); |
6b7c5b94 SP |
1651 | do_none: |
1652 | return status; | |
1653 | } | |
1654 | ||
5fb379ee SP |
1655 | static int be_clear(struct be_adapter *adapter) |
1656 | { | |
1a8887d8 | 1657 | be_mcc_queues_destroy(adapter); |
5fb379ee SP |
1658 | be_rx_queues_destroy(adapter); |
1659 | be_tx_queues_destroy(adapter); | |
1660 | ||
8788fdc2 | 1661 | be_cmd_if_destroy(adapter, adapter->if_handle); |
5fb379ee | 1662 | |
5fb379ee SP |
1663 | return 0; |
1664 | } | |
1665 | ||
6b7c5b94 SP |
1666 | static int be_close(struct net_device *netdev) |
1667 | { | |
1668 | struct be_adapter *adapter = netdev_priv(netdev); | |
6b7c5b94 SP |
1669 | struct be_eq_obj *rx_eq = &adapter->rx_eq; |
1670 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1671 | int vec; | |
1672 | ||
b305be78 | 1673 | cancel_delayed_work_sync(&adapter->work); |
6b7c5b94 SP |
1674 | |
1675 | netif_stop_queue(netdev); | |
1676 | netif_carrier_off(netdev); | |
a8f447bd | 1677 | adapter->link_up = false; |
6b7c5b94 | 1678 | |
8788fdc2 | 1679 | be_intr_set(adapter, false); |
6b7c5b94 SP |
1680 | |
1681 | if (adapter->msix_enabled) { | |
1682 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1683 | synchronize_irq(vec); | |
1684 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | |
1685 | synchronize_irq(vec); | |
1686 | } else { | |
1687 | synchronize_irq(netdev->irq); | |
1688 | } | |
1689 | be_irq_unregister(adapter); | |
1690 | ||
1691 | napi_disable(&rx_eq->napi); | |
1692 | napi_disable(&tx_eq->napi); | |
1693 | ||
a8e9179a SP |
1694 | /* Wait for all pending tx completions to arrive so that |
1695 | * all tx skbs are freed. | |
1696 | */ | |
1697 | be_tx_compl_clean(adapter); | |
1698 | ||
6b7c5b94 SP |
1699 | return 0; |
1700 | } | |
1701 | ||
6b7c5b94 SP |
1702 | static struct net_device_ops be_netdev_ops = { |
1703 | .ndo_open = be_open, | |
1704 | .ndo_stop = be_close, | |
1705 | .ndo_start_xmit = be_xmit, | |
1706 | .ndo_get_stats = be_get_stats, | |
1707 | .ndo_set_rx_mode = be_set_multicast_list, | |
1708 | .ndo_set_mac_address = be_mac_addr_set, | |
1709 | .ndo_change_mtu = be_change_mtu, | |
1710 | .ndo_validate_addr = eth_validate_addr, | |
1711 | .ndo_vlan_rx_register = be_vlan_register, | |
1712 | .ndo_vlan_rx_add_vid = be_vlan_add_vid, | |
1713 | .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, | |
1714 | }; | |
1715 | ||
1716 | static void be_netdev_init(struct net_device *netdev) | |
1717 | { | |
1718 | struct be_adapter *adapter = netdev_priv(netdev); | |
1719 | ||
1720 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | |
1721 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | |
5be93b9a | 1722 | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
6b7c5b94 SP |
1723 | |
1724 | netdev->flags |= IFF_MULTICAST; | |
1725 | ||
728a9972 AK |
1726 | adapter->rx_csum = true; |
1727 | ||
6b7c5b94 SP |
1728 | BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); |
1729 | ||
1730 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | |
1731 | ||
6b7c5b94 SP |
1732 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, |
1733 | BE_NAPI_WEIGHT); | |
5fb379ee | 1734 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, |
6b7c5b94 SP |
1735 | BE_NAPI_WEIGHT); |
1736 | ||
1737 | netif_carrier_off(netdev); | |
1738 | netif_stop_queue(netdev); | |
1739 | } | |
1740 | ||
1741 | static void be_unmap_pci_bars(struct be_adapter *adapter) | |
1742 | { | |
8788fdc2 SP |
1743 | if (adapter->csr) |
1744 | iounmap(adapter->csr); | |
1745 | if (adapter->db) | |
1746 | iounmap(adapter->db); | |
1747 | if (adapter->pcicfg) | |
1748 | iounmap(adapter->pcicfg); | |
6b7c5b94 SP |
1749 | } |
1750 | ||
1751 | static int be_map_pci_bars(struct be_adapter *adapter) | |
1752 | { | |
1753 | u8 __iomem *addr; | |
1754 | ||
1755 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), | |
1756 | pci_resource_len(adapter->pdev, 2)); | |
1757 | if (addr == NULL) | |
1758 | return -ENOMEM; | |
8788fdc2 | 1759 | adapter->csr = addr; |
6b7c5b94 SP |
1760 | |
1761 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), | |
1762 | 128 * 1024); | |
1763 | if (addr == NULL) | |
1764 | goto pci_map_err; | |
8788fdc2 | 1765 | adapter->db = addr; |
6b7c5b94 SP |
1766 | |
1767 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), | |
1768 | pci_resource_len(adapter->pdev, 1)); | |
1769 | if (addr == NULL) | |
1770 | goto pci_map_err; | |
8788fdc2 | 1771 | adapter->pcicfg = addr; |
6b7c5b94 SP |
1772 | |
1773 | return 0; | |
1774 | pci_map_err: | |
1775 | be_unmap_pci_bars(adapter); | |
1776 | return -ENOMEM; | |
1777 | } | |
1778 | ||
1779 | ||
1780 | static void be_ctrl_cleanup(struct be_adapter *adapter) | |
1781 | { | |
8788fdc2 | 1782 | struct be_dma_mem *mem = &adapter->mbox_mem_alloced; |
6b7c5b94 SP |
1783 | |
1784 | be_unmap_pci_bars(adapter); | |
1785 | ||
1786 | if (mem->va) | |
1787 | pci_free_consistent(adapter->pdev, mem->size, | |
1788 | mem->va, mem->dma); | |
1789 | } | |
1790 | ||
6b7c5b94 SP |
1791 | static int be_ctrl_init(struct be_adapter *adapter) |
1792 | { | |
8788fdc2 SP |
1793 | struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; |
1794 | struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; | |
6b7c5b94 | 1795 | int status; |
6b7c5b94 SP |
1796 | |
1797 | status = be_map_pci_bars(adapter); | |
1798 | if (status) | |
1799 | return status; | |
1800 | ||
1801 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | |
1802 | mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, | |
1803 | mbox_mem_alloc->size, &mbox_mem_alloc->dma); | |
1804 | if (!mbox_mem_alloc->va) { | |
1805 | be_unmap_pci_bars(adapter); | |
1806 | return -1; | |
1807 | } | |
1808 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | |
1809 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | |
1810 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | |
1811 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | |
8788fdc2 SP |
1812 | spin_lock_init(&adapter->mbox_lock); |
1813 | spin_lock_init(&adapter->mcc_lock); | |
1814 | spin_lock_init(&adapter->mcc_cq_lock); | |
a8f447bd | 1815 | |
6b7c5b94 SP |
1816 | return 0; |
1817 | } | |
1818 | ||
1819 | static void be_stats_cleanup(struct be_adapter *adapter) | |
1820 | { | |
1821 | struct be_stats_obj *stats = &adapter->stats; | |
1822 | struct be_dma_mem *cmd = &stats->cmd; | |
1823 | ||
1824 | if (cmd->va) | |
1825 | pci_free_consistent(adapter->pdev, cmd->size, | |
1826 | cmd->va, cmd->dma); | |
1827 | } | |
1828 | ||
1829 | static int be_stats_init(struct be_adapter *adapter) | |
1830 | { | |
1831 | struct be_stats_obj *stats = &adapter->stats; | |
1832 | struct be_dma_mem *cmd = &stats->cmd; | |
1833 | ||
1834 | cmd->size = sizeof(struct be_cmd_req_get_stats); | |
1835 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | |
1836 | if (cmd->va == NULL) | |
1837 | return -1; | |
1838 | return 0; | |
1839 | } | |
1840 | ||
1841 | static void __devexit be_remove(struct pci_dev *pdev) | |
1842 | { | |
1843 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1844 | if (!adapter) | |
1845 | return; | |
1846 | ||
1847 | unregister_netdev(adapter->netdev); | |
1848 | ||
5fb379ee SP |
1849 | be_clear(adapter); |
1850 | ||
6b7c5b94 SP |
1851 | be_stats_cleanup(adapter); |
1852 | ||
1853 | be_ctrl_cleanup(adapter); | |
1854 | ||
1855 | if (adapter->msix_enabled) { | |
1856 | pci_disable_msix(adapter->pdev); | |
1857 | adapter->msix_enabled = false; | |
1858 | } | |
1859 | ||
1860 | pci_set_drvdata(pdev, NULL); | |
1861 | pci_release_regions(pdev); | |
1862 | pci_disable_device(pdev); | |
1863 | ||
1864 | free_netdev(adapter->netdev); | |
1865 | } | |
1866 | ||
1867 | static int be_hw_up(struct be_adapter *adapter) | |
1868 | { | |
6b7c5b94 SP |
1869 | int status; |
1870 | ||
8788fdc2 | 1871 | status = be_cmd_POST(adapter); |
6b7c5b94 SP |
1872 | if (status) |
1873 | return status; | |
1874 | ||
8788fdc2 | 1875 | status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); |
6b7c5b94 SP |
1876 | if (status) |
1877 | return status; | |
1878 | ||
8788fdc2 | 1879 | status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); |
6b7c5b94 SP |
1880 | return status; |
1881 | } | |
1882 | ||
1883 | static int __devinit be_probe(struct pci_dev *pdev, | |
1884 | const struct pci_device_id *pdev_id) | |
1885 | { | |
1886 | int status = 0; | |
1887 | struct be_adapter *adapter; | |
1888 | struct net_device *netdev; | |
6b7c5b94 SP |
1889 | u8 mac[ETH_ALEN]; |
1890 | ||
1891 | status = pci_enable_device(pdev); | |
1892 | if (status) | |
1893 | goto do_none; | |
1894 | ||
1895 | status = pci_request_regions(pdev, DRV_NAME); | |
1896 | if (status) | |
1897 | goto disable_dev; | |
1898 | pci_set_master(pdev); | |
1899 | ||
1900 | netdev = alloc_etherdev(sizeof(struct be_adapter)); | |
1901 | if (netdev == NULL) { | |
1902 | status = -ENOMEM; | |
1903 | goto rel_reg; | |
1904 | } | |
1905 | adapter = netdev_priv(netdev); | |
1906 | adapter->pdev = pdev; | |
1907 | pci_set_drvdata(pdev, adapter); | |
1908 | adapter->netdev = netdev; | |
1909 | ||
1910 | be_msix_enable(adapter); | |
1911 | ||
e930438c | 1912 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
6b7c5b94 SP |
1913 | if (!status) { |
1914 | netdev->features |= NETIF_F_HIGHDMA; | |
1915 | } else { | |
e930438c | 1916 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
6b7c5b94 SP |
1917 | if (status) { |
1918 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); | |
1919 | goto free_netdev; | |
1920 | } | |
1921 | } | |
1922 | ||
6b7c5b94 SP |
1923 | status = be_ctrl_init(adapter); |
1924 | if (status) | |
1925 | goto free_netdev; | |
1926 | ||
14074eab | 1927 | status = be_cmd_reset_function(adapter); |
1928 | if (status) | |
1929 | goto ctrl_clean; | |
1930 | ||
6b7c5b94 SP |
1931 | status = be_stats_init(adapter); |
1932 | if (status) | |
1933 | goto ctrl_clean; | |
1934 | ||
1935 | status = be_hw_up(adapter); | |
1936 | if (status) | |
1937 | goto stats_clean; | |
1938 | ||
8788fdc2 | 1939 | status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, |
6b7c5b94 SP |
1940 | true /* permanent */, 0); |
1941 | if (status) | |
1942 | goto stats_clean; | |
1943 | memcpy(netdev->dev_addr, mac, ETH_ALEN); | |
1944 | ||
1945 | INIT_DELAYED_WORK(&adapter->work, be_worker); | |
1946 | be_netdev_init(netdev); | |
1947 | SET_NETDEV_DEV(netdev, &adapter->pdev->dev); | |
1948 | ||
5fb379ee SP |
1949 | status = be_setup(adapter); |
1950 | if (status) | |
1951 | goto stats_clean; | |
6b7c5b94 SP |
1952 | status = register_netdev(netdev); |
1953 | if (status != 0) | |
5fb379ee | 1954 | goto unsetup; |
6b7c5b94 | 1955 | |
c4ca2374 | 1956 | dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); |
6b7c5b94 SP |
1957 | return 0; |
1958 | ||
5fb379ee SP |
1959 | unsetup: |
1960 | be_clear(adapter); | |
6b7c5b94 SP |
1961 | stats_clean: |
1962 | be_stats_cleanup(adapter); | |
1963 | ctrl_clean: | |
1964 | be_ctrl_cleanup(adapter); | |
1965 | free_netdev: | |
1966 | free_netdev(adapter->netdev); | |
1967 | rel_reg: | |
1968 | pci_release_regions(pdev); | |
1969 | disable_dev: | |
1970 | pci_disable_device(pdev); | |
1971 | do_none: | |
c4ca2374 | 1972 | dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); |
6b7c5b94 SP |
1973 | return status; |
1974 | } | |
1975 | ||
1976 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |
1977 | { | |
1978 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1979 | struct net_device *netdev = adapter->netdev; | |
1980 | ||
1981 | netif_device_detach(netdev); | |
1982 | if (netif_running(netdev)) { | |
1983 | rtnl_lock(); | |
1984 | be_close(netdev); | |
1985 | rtnl_unlock(); | |
1986 | } | |
9b0365f1 | 1987 | be_clear(adapter); |
6b7c5b94 SP |
1988 | |
1989 | pci_save_state(pdev); | |
1990 | pci_disable_device(pdev); | |
1991 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
1992 | return 0; | |
1993 | } | |
1994 | ||
1995 | static int be_resume(struct pci_dev *pdev) | |
1996 | { | |
1997 | int status = 0; | |
1998 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1999 | struct net_device *netdev = adapter->netdev; | |
2000 | ||
2001 | netif_device_detach(netdev); | |
2002 | ||
2003 | status = pci_enable_device(pdev); | |
2004 | if (status) | |
2005 | return status; | |
2006 | ||
2007 | pci_set_power_state(pdev, 0); | |
2008 | pci_restore_state(pdev); | |
2009 | ||
9b0365f1 | 2010 | be_setup(adapter); |
6b7c5b94 SP |
2011 | if (netif_running(netdev)) { |
2012 | rtnl_lock(); | |
2013 | be_open(netdev); | |
2014 | rtnl_unlock(); | |
2015 | } | |
2016 | netif_device_attach(netdev); | |
2017 | return 0; | |
2018 | } | |
2019 | ||
2020 | static struct pci_driver be_driver = { | |
2021 | .name = DRV_NAME, | |
2022 | .id_table = be_dev_ids, | |
2023 | .probe = be_probe, | |
2024 | .remove = be_remove, | |
2025 | .suspend = be_suspend, | |
2026 | .resume = be_resume | |
2027 | }; | |
2028 | ||
2029 | static int __init be_init_module(void) | |
2030 | { | |
2031 | if (rx_frag_size != 8192 && rx_frag_size != 4096 | |
2032 | && rx_frag_size != 2048) { | |
2033 | printk(KERN_WARNING DRV_NAME | |
2034 | " : Module param rx_frag_size must be 2048/4096/8192." | |
2035 | " Using 2048\n"); | |
2036 | rx_frag_size = 2048; | |
2037 | } | |
6b7c5b94 SP |
2038 | |
2039 | return pci_register_driver(&be_driver); | |
2040 | } | |
2041 | module_init(be_init_module); | |
2042 | ||
2043 | static void __exit be_exit_module(void) | |
2044 | { | |
2045 | pci_unregister_driver(&be_driver); | |
2046 | } | |
2047 | module_exit(be_exit_module); |