]>
Commit | Line | Data |
---|---|---|
1053d35f RR |
1 | /****************************************************************************** |
2 | * | |
901069c7 | 3 | * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. |
1053d35f RR |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
759ef89f | 25 | * Intel Linux Wireless <[email protected]> |
1053d35f RR |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
fd4abac5 | 30 | #include <linux/etherdevice.h> |
d43c36dc | 31 | #include <linux/sched.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
1053d35f RR |
33 | #include <net/mac80211.h> |
34 | #include "iwl-eeprom.h" | |
214d14d4 | 35 | #include "iwl-agn.h" |
1053d35f RR |
36 | #include "iwl-dev.h" |
37 | #include "iwl-core.h" | |
38 | #include "iwl-sta.h" | |
39 | #include "iwl-io.h" | |
40 | #include "iwl-helpers.h" | |
41 | ||
fd4abac5 TW |
42 | /** |
43 | * iwl_txq_update_write_ptr - Send new write index to hardware | |
44 | */ | |
7bfedc59 | 45 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) |
fd4abac5 TW |
46 | { |
47 | u32 reg = 0; | |
fd4abac5 TW |
48 | int txq_id = txq->q.id; |
49 | ||
50 | if (txq->need_update == 0) | |
7bfedc59 | 51 | return; |
fd4abac5 | 52 | |
f81c1f48 WYG |
53 | if (priv->cfg->base_params->shadow_reg_enable) { |
54 | /* shadow register enabled */ | |
55 | iwl_write32(priv, HBUS_TARG_WRPTR, | |
56 | txq->q.write_ptr | (txq_id << 8)); | |
57 | } else { | |
58 | /* if we're trying to save power */ | |
59 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
60 | /* wake up nic if it's powered down ... | |
61 | * uCode will wake up, and interrupt us again, so next | |
62 | * time we'll skip this part. */ | |
63 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | |
fd4abac5 | 64 | |
f81c1f48 WYG |
65 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { |
66 | IWL_DEBUG_INFO(priv, | |
67 | "Tx queue %d requesting wakeup," | |
68 | " GP1 = 0x%x\n", txq_id, reg); | |
69 | iwl_set_bit(priv, CSR_GP_CNTRL, | |
70 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
71 | return; | |
72 | } | |
fd4abac5 | 73 | |
f81c1f48 | 74 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, |
fd4abac5 | 75 | txq->q.write_ptr | (txq_id << 8)); |
fd4abac5 | 76 | |
f81c1f48 WYG |
77 | /* |
78 | * else not in power-save mode, | |
79 | * uCode will never sleep when we're | |
80 | * trying to tx (during RFKILL, we're not trying to tx). | |
81 | */ | |
82 | } else | |
83 | iwl_write32(priv, HBUS_TARG_WRPTR, | |
84 | txq->q.write_ptr | (txq_id << 8)); | |
85 | } | |
fd4abac5 | 86 | txq->need_update = 0; |
fd4abac5 | 87 | } |
fd4abac5 | 88 | |
214d14d4 JB |
89 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) |
90 | { | |
91 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
92 | ||
93 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | |
94 | if (sizeof(dma_addr_t) > sizeof(u32)) | |
95 | addr |= | |
96 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | |
97 | ||
98 | return addr; | |
99 | } | |
100 | ||
101 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | |
102 | { | |
103 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
104 | ||
105 | return le16_to_cpu(tb->hi_n_len) >> 4; | |
106 | } | |
107 | ||
108 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | |
109 | dma_addr_t addr, u16 len) | |
110 | { | |
111 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
112 | u16 hi_n_len = len << 4; | |
113 | ||
114 | put_unaligned_le32(addr, &tb->lo); | |
115 | if (sizeof(dma_addr_t) > sizeof(u32)) | |
116 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | |
117 | ||
118 | tb->hi_n_len = cpu_to_le16(hi_n_len); | |
119 | ||
120 | tfd->num_tbs = idx + 1; | |
121 | } | |
122 | ||
123 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | |
124 | { | |
125 | return tfd->num_tbs & 0x1f; | |
126 | } | |
127 | ||
4ce7cc2b JB |
128 | static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, |
129 | struct iwl_tfd *tfd) | |
214d14d4 | 130 | { |
214d14d4 JB |
131 | int i; |
132 | int num_tbs; | |
133 | ||
214d14d4 JB |
134 | /* Sanity check on number of chunks */ |
135 | num_tbs = iwl_tfd_get_num_tbs(tfd); | |
136 | ||
137 | if (num_tbs >= IWL_NUM_OF_TBS) { | |
138 | IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); | |
139 | /* @todo issue fatal error, it is quite serious situation */ | |
140 | return; | |
141 | } | |
142 | ||
143 | /* Unmap tx_cmd */ | |
144 | if (num_tbs) | |
795414db | 145 | dma_unmap_single(priv->bus.dev, |
4ce7cc2b JB |
146 | dma_unmap_addr(meta, mapping), |
147 | dma_unmap_len(meta, len), | |
795414db | 148 | DMA_BIDIRECTIONAL); |
214d14d4 JB |
149 | |
150 | /* Unmap chunks, if any. */ | |
151 | for (i = 1; i < num_tbs; i++) | |
795414db EG |
152 | dma_unmap_single(priv->bus.dev, iwl_tfd_tb_get_addr(tfd, i), |
153 | iwl_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE); | |
4ce7cc2b JB |
154 | } |
155 | ||
156 | /** | |
157 | * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | |
158 | * @priv - driver private data | |
159 | * @txq - tx queue | |
160 | * | |
161 | * Does NOT advance any TFD circular buffer read/write indexes | |
162 | * Does NOT free the TFD itself (which is within circular buffer) | |
163 | */ | |
164 | void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |
165 | { | |
166 | struct iwl_tfd *tfd_tmp = txq->tfds; | |
167 | int index = txq->q.read_ptr; | |
168 | ||
169 | iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]); | |
214d14d4 JB |
170 | |
171 | /* free SKB */ | |
172 | if (txq->txb) { | |
173 | struct sk_buff *skb; | |
174 | ||
175 | skb = txq->txb[txq->q.read_ptr].skb; | |
176 | ||
177 | /* can be called from irqs-disabled context */ | |
178 | if (skb) { | |
179 | dev_kfree_skb_any(skb); | |
180 | txq->txb[txq->q.read_ptr].skb = NULL; | |
181 | } | |
182 | } | |
183 | } | |
184 | ||
185 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, | |
186 | struct iwl_tx_queue *txq, | |
187 | dma_addr_t addr, u16 len, | |
4c42db0f | 188 | u8 reset) |
214d14d4 JB |
189 | { |
190 | struct iwl_queue *q; | |
191 | struct iwl_tfd *tfd, *tfd_tmp; | |
192 | u32 num_tbs; | |
193 | ||
194 | q = &txq->q; | |
4ce7cc2b | 195 | tfd_tmp = txq->tfds; |
214d14d4 JB |
196 | tfd = &tfd_tmp[q->write_ptr]; |
197 | ||
198 | if (reset) | |
199 | memset(tfd, 0, sizeof(*tfd)); | |
200 | ||
201 | num_tbs = iwl_tfd_get_num_tbs(tfd); | |
202 | ||
203 | /* Each TFD can point to a maximum 20 Tx buffers */ | |
204 | if (num_tbs >= IWL_NUM_OF_TBS) { | |
205 | IWL_ERR(priv, "Error can not send more than %d chunks\n", | |
206 | IWL_NUM_OF_TBS); | |
207 | return -EINVAL; | |
208 | } | |
209 | ||
210 | if (WARN_ON(addr & ~DMA_BIT_MASK(36))) | |
211 | return -EINVAL; | |
212 | ||
213 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) | |
214 | IWL_ERR(priv, "Unaligned address = %llx\n", | |
215 | (unsigned long long)addr); | |
216 | ||
217 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | /* | |
223 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
224 | * given Tx queue, and enable the DMA channel used for that queue. | |
225 | * | |
226 | * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA | |
227 | * channels supported in hardware. | |
228 | */ | |
229 | static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |
230 | { | |
231 | int txq_id = txq->q.id; | |
232 | ||
233 | /* Circular buffer (TFD queue in DRAM) physical base address */ | |
234 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | |
235 | txq->q.dma_addr >> 8); | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
387f3381 SG |
240 | /** |
241 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | |
242 | */ | |
243 | void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) | |
244 | { | |
245 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
246 | struct iwl_queue *q = &txq->q; | |
247 | ||
248 | if (q->n_bd == 0) | |
249 | return; | |
250 | ||
251 | while (q->write_ptr != q->read_ptr) { | |
214d14d4 | 252 | iwlagn_txq_free_tfd(priv, txq); |
387f3381 SG |
253 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
254 | } | |
255 | } | |
256 | ||
1053d35f RR |
257 | /** |
258 | * iwl_tx_queue_free - Deallocate DMA queue. | |
259 | * @txq: Transmit queue to deallocate. | |
260 | * | |
261 | * Empty queue by removing and destroying all BD's. | |
262 | * Free all buffers. | |
263 | * 0-fill, but do not free "txq" descriptor structure. | |
264 | */ | |
a8e74e27 | 265 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) |
1053d35f | 266 | { |
da99c4b6 | 267 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
3599d39a | 268 | struct device *dev = priv->bus.dev; |
71c55d90 | 269 | int i; |
1053d35f | 270 | |
387f3381 | 271 | iwl_tx_queue_unmap(priv, txq_id); |
1053d35f | 272 | |
1053d35f | 273 | /* De-alloc array of command/tx buffers */ |
961ba60a | 274 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
da99c4b6 | 275 | kfree(txq->cmd[i]); |
1053d35f RR |
276 | |
277 | /* De-alloc circular buffer of TFDs */ | |
278 | if (txq->q.n_bd) | |
f36d04ab SG |
279 | dma_free_coherent(dev, priv->hw_params.tfd_size * |
280 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | |
1053d35f RR |
281 | |
282 | /* De-alloc array of per-TFD driver data */ | |
283 | kfree(txq->txb); | |
284 | txq->txb = NULL; | |
285 | ||
c2acea8e JB |
286 | /* deallocate arrays */ |
287 | kfree(txq->cmd); | |
288 | kfree(txq->meta); | |
289 | txq->cmd = NULL; | |
290 | txq->meta = NULL; | |
291 | ||
1053d35f RR |
292 | /* 0-fill queue descriptor structure */ |
293 | memset(txq, 0, sizeof(*txq)); | |
294 | } | |
961ba60a TW |
295 | |
296 | /** | |
387f3381 | 297 | * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue |
961ba60a | 298 | */ |
387f3381 | 299 | void iwl_cmd_queue_unmap(struct iwl_priv *priv) |
961ba60a | 300 | { |
13bb9483 | 301 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
961ba60a | 302 | struct iwl_queue *q = &txq->q; |
71c55d90 | 303 | int i; |
961ba60a TW |
304 | |
305 | if (q->n_bd == 0) | |
306 | return; | |
307 | ||
387f3381 | 308 | while (q->read_ptr != q->write_ptr) { |
4ce7cc2b | 309 | i = get_cmd_index(q, q->read_ptr); |
dd487449 | 310 | |
3598e177 | 311 | if (txq->meta[i].flags & CMD_MAPPED) { |
f8d651a5 | 312 | iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i]); |
3598e177 SG |
313 | txq->meta[i].flags = 0; |
314 | } | |
dd487449 | 315 | |
3598e177 | 316 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
dd487449 | 317 | } |
387f3381 SG |
318 | } |
319 | ||
320 | /** | |
321 | * iwl_cmd_queue_free - Deallocate DMA queue. | |
322 | * @txq: Transmit queue to deallocate. | |
323 | * | |
324 | * Empty queue by removing and destroying all BD's. | |
325 | * Free all buffers. | |
326 | * 0-fill, but do not free "txq" descriptor structure. | |
327 | */ | |
328 | void iwl_cmd_queue_free(struct iwl_priv *priv) | |
329 | { | |
330 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | |
3599d39a | 331 | struct device *dev = priv->bus.dev; |
387f3381 SG |
332 | int i; |
333 | ||
334 | iwl_cmd_queue_unmap(priv); | |
dd487449 | 335 | |
961ba60a | 336 | /* De-alloc array of command/tx buffers */ |
4ce7cc2b | 337 | for (i = 0; i < TFD_CMD_SLOTS; i++) |
961ba60a TW |
338 | kfree(txq->cmd[i]); |
339 | ||
340 | /* De-alloc circular buffer of TFDs */ | |
341 | if (txq->q.n_bd) | |
f36d04ab SG |
342 | dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, |
343 | txq->tfds, txq->q.dma_addr); | |
961ba60a | 344 | |
28142986 RC |
345 | /* deallocate arrays */ |
346 | kfree(txq->cmd); | |
347 | kfree(txq->meta); | |
348 | txq->cmd = NULL; | |
349 | txq->meta = NULL; | |
350 | ||
961ba60a TW |
351 | /* 0-fill queue descriptor structure */ |
352 | memset(txq, 0, sizeof(*txq)); | |
353 | } | |
3e5d238f | 354 | |
fd4abac5 TW |
355 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
356 | * DMA services | |
357 | * | |
358 | * Theory of operation | |
359 | * | |
360 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | |
361 | * of buffer descriptors, each of which points to one or more data buffers for | |
362 | * the device to read from or fill. Driver and device exchange status of each | |
363 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | |
364 | * entries in each circular buffer, to protect against confusing empty and full | |
365 | * queue states. | |
366 | * | |
367 | * The device reads or writes the data in the queues via the device's several | |
368 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | |
369 | * | |
370 | * For Tx queue, there are low mark and high mark limits. If, after queuing | |
371 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | |
372 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | |
373 | * Tx queue resumed. | |
374 | * | |
fd4abac5 TW |
375 | ***************************************************/ |
376 | ||
377 | int iwl_queue_space(const struct iwl_queue *q) | |
378 | { | |
379 | int s = q->read_ptr - q->write_ptr; | |
380 | ||
381 | if (q->read_ptr > q->write_ptr) | |
382 | s -= q->n_bd; | |
383 | ||
384 | if (s <= 0) | |
385 | s += q->n_window; | |
386 | /* keep some reserve to not confuse empty and full situations */ | |
387 | s -= 2; | |
388 | if (s < 0) | |
389 | s = 0; | |
390 | return s; | |
391 | } | |
fd4abac5 TW |
392 | |
393 | ||
1053d35f RR |
394 | /** |
395 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | |
396 | */ | |
443cfd45 | 397 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, |
1053d35f RR |
398 | int count, int slots_num, u32 id) |
399 | { | |
400 | q->n_bd = count; | |
401 | q->n_window = slots_num; | |
402 | q->id = id; | |
403 | ||
404 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | |
405 | * and iwl_queue_dec_wrap are broken. */ | |
3e41ace5 JB |
406 | if (WARN_ON(!is_power_of_2(count))) |
407 | return -EINVAL; | |
1053d35f RR |
408 | |
409 | /* slots_num must be power-of-two size, otherwise | |
410 | * get_cmd_index is broken. */ | |
3e41ace5 JB |
411 | if (WARN_ON(!is_power_of_2(slots_num))) |
412 | return -EINVAL; | |
1053d35f RR |
413 | |
414 | q->low_mark = q->n_window / 4; | |
415 | if (q->low_mark < 4) | |
416 | q->low_mark = 4; | |
417 | ||
418 | q->high_mark = q->n_window / 8; | |
419 | if (q->high_mark < 2) | |
420 | q->high_mark = 2; | |
421 | ||
422 | q->write_ptr = q->read_ptr = 0; | |
423 | ||
424 | return 0; | |
425 | } | |
426 | ||
427 | /** | |
428 | * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue | |
429 | */ | |
430 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |
16466903 | 431 | struct iwl_tx_queue *txq, u32 id) |
1053d35f | 432 | { |
3599d39a | 433 | struct device *dev = priv->bus.dev; |
3978e5bc | 434 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; |
1053d35f RR |
435 | |
436 | /* Driver private data, only for Tx (not command) queues, | |
437 | * not shared with device. */ | |
13bb9483 | 438 | if (id != priv->cmd_queue) { |
519c7c41 | 439 | txq->txb = kzalloc(sizeof(txq->txb[0]) * |
1053d35f RR |
440 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); |
441 | if (!txq->txb) { | |
15b1687c | 442 | IWL_ERR(priv, "kmalloc for auxiliary BD " |
1053d35f RR |
443 | "structures failed\n"); |
444 | goto error; | |
445 | } | |
3978e5bc | 446 | } else { |
1053d35f | 447 | txq->txb = NULL; |
3978e5bc | 448 | } |
1053d35f RR |
449 | |
450 | /* Circular buffer of transmit frame descriptors (TFDs), | |
451 | * shared with device */ | |
f36d04ab SG |
452 | txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, |
453 | GFP_KERNEL); | |
499b1883 | 454 | if (!txq->tfds) { |
795414db | 455 | IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); |
1053d35f RR |
456 | goto error; |
457 | } | |
458 | txq->q.id = id; | |
459 | ||
460 | return 0; | |
461 | ||
462 | error: | |
463 | kfree(txq->txb); | |
464 | txq->txb = NULL; | |
465 | ||
466 | return -ENOMEM; | |
467 | } | |
468 | ||
1053d35f RR |
469 | /** |
470 | * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue | |
471 | */ | |
a8e74e27 SO |
472 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
473 | int slots_num, u32 txq_id) | |
1053d35f | 474 | { |
da99c4b6 | 475 | int i, len; |
73b7d742 | 476 | int ret; |
c2acea8e | 477 | |
4ce7cc2b | 478 | txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num, |
c2acea8e | 479 | GFP_KERNEL); |
4ce7cc2b | 480 | txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num, |
c2acea8e JB |
481 | GFP_KERNEL); |
482 | ||
483 | if (!txq->meta || !txq->cmd) | |
484 | goto out_free_arrays; | |
485 | ||
486 | len = sizeof(struct iwl_device_cmd); | |
4ce7cc2b | 487 | for (i = 0; i < slots_num; i++) { |
49898852 | 488 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); |
da99c4b6 | 489 | if (!txq->cmd[i]) |
73b7d742 | 490 | goto err; |
da99c4b6 | 491 | } |
1053d35f RR |
492 | |
493 | /* Alloc driver data array and TFD circular buffer */ | |
73b7d742 TW |
494 | ret = iwl_tx_queue_alloc(priv, txq, txq_id); |
495 | if (ret) | |
496 | goto err; | |
1053d35f | 497 | |
1053d35f RR |
498 | txq->need_update = 0; |
499 | ||
1a716557 | 500 | /* |
ea9b307f JB |
501 | * For the default queues 0-3, set up the swq_id |
502 | * already -- all others need to get one later | |
503 | * (if they need one at all). | |
1a716557 | 504 | */ |
ea9b307f JB |
505 | if (txq_id < 4) |
506 | iwl_set_swq_id(txq, txq_id, txq_id); | |
45af8195 | 507 | |
1053d35f RR |
508 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
509 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
510 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
511 | ||
512 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
3e41ace5 JB |
513 | ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); |
514 | if (ret) | |
515 | return ret; | |
1053d35f RR |
516 | |
517 | /* Tell device where to find queue */ | |
214d14d4 | 518 | iwlagn_tx_queue_init(priv, txq); |
1053d35f RR |
519 | |
520 | return 0; | |
73b7d742 | 521 | err: |
4ce7cc2b | 522 | for (i = 0; i < slots_num; i++) |
73b7d742 | 523 | kfree(txq->cmd[i]); |
c2acea8e JB |
524 | out_free_arrays: |
525 | kfree(txq->meta); | |
526 | kfree(txq->cmd); | |
73b7d742 | 527 | |
73b7d742 | 528 | return -ENOMEM; |
1053d35f | 529 | } |
a8e74e27 | 530 | |
de0f60ea ZY |
531 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
532 | int slots_num, u32 txq_id) | |
533 | { | |
534 | int actual_slots = slots_num; | |
535 | ||
13bb9483 | 536 | if (txq_id == priv->cmd_queue) |
de0f60ea ZY |
537 | actual_slots++; |
538 | ||
539 | memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | |
540 | ||
541 | txq->need_update = 0; | |
542 | ||
543 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
544 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | |
545 | ||
546 | /* Tell device where to find queue */ | |
214d14d4 | 547 | iwlagn_tx_queue_init(priv, txq); |
de0f60ea | 548 | } |
de0f60ea | 549 | |
fd4abac5 TW |
550 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
551 | ||
552 | /** | |
553 | * iwl_enqueue_hcmd - enqueue a uCode command | |
554 | * @priv: device private data point | |
555 | * @cmd: a point to the ucode command structure | |
556 | * | |
557 | * The function returns < 0 values to indicate the operation is | |
558 | * failed. On success, it turns the index (> 0) of command in the | |
559 | * command queue. | |
560 | */ | |
561 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |
562 | { | |
13bb9483 | 563 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
fd4abac5 | 564 | struct iwl_queue *q = &txq->q; |
c2acea8e JB |
565 | struct iwl_device_cmd *out_cmd; |
566 | struct iwl_cmd_meta *out_meta; | |
fd4abac5 | 567 | dma_addr_t phys_addr; |
fd4abac5 | 568 | unsigned long flags; |
f3674227 | 569 | u32 idx; |
4ce7cc2b | 570 | u16 copy_size, cmd_size; |
0975cc8f | 571 | bool is_ct_kill = false; |
4ce7cc2b JB |
572 | bool had_nocopy = false; |
573 | int i; | |
574 | u8 *cmd_dest; | |
575 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | |
576 | const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; | |
577 | int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; | |
578 | int trace_idx; | |
579 | #endif | |
fd4abac5 | 580 | |
3083d03c WYG |
581 | if (test_bit(STATUS_FW_ERROR, &priv->status)) { |
582 | IWL_WARN(priv, "fw recovery, no hcmd send\n"); | |
583 | return -EIO; | |
584 | } | |
585 | ||
4ce7cc2b JB |
586 | copy_size = sizeof(out_cmd->hdr); |
587 | cmd_size = sizeof(out_cmd->hdr); | |
588 | ||
589 | /* need one for the header if the first is NOCOPY */ | |
590 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); | |
591 | ||
592 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | |
593 | if (!cmd->len[i]) | |
594 | continue; | |
595 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | |
596 | had_nocopy = true; | |
597 | } else { | |
598 | /* NOCOPY must not be followed by normal! */ | |
599 | if (WARN_ON(had_nocopy)) | |
600 | return -EINVAL; | |
601 | copy_size += cmd->len[i]; | |
602 | } | |
603 | cmd_size += cmd->len[i]; | |
604 | } | |
fd4abac5 | 605 | |
3e41ace5 JB |
606 | /* |
607 | * If any of the command structures end up being larger than | |
4ce7cc2b JB |
608 | * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically |
609 | * allocated into separate TFDs, then we will need to | |
610 | * increase the size of the buffers. | |
3e41ace5 | 611 | */ |
4ce7cc2b | 612 | if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) |
3e41ace5 | 613 | return -EINVAL; |
fd4abac5 | 614 | |
7812b167 | 615 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { |
f2f21b49 RC |
616 | IWL_WARN(priv, "Not sending command - %s KILL\n", |
617 | iwl_is_rfkill(priv) ? "RF" : "CT"); | |
fd4abac5 TW |
618 | return -EIO; |
619 | } | |
7b21f00e | 620 | |
3598e177 SG |
621 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
622 | ||
c2acea8e | 623 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
3598e177 SG |
624 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); |
625 | ||
2d237f71 | 626 | IWL_ERR(priv, "No space in command queue\n"); |
f42e7662 | 627 | is_ct_kill = iwl_check_for_ct_kill(priv); |
0975cc8f | 628 | if (!is_ct_kill) { |
7812b167 | 629 | IWL_ERR(priv, "Restarting adapter due to queue full\n"); |
e649437f | 630 | iwlagn_fw_error(priv, false); |
7812b167 | 631 | } |
fd4abac5 TW |
632 | return -ENOSPC; |
633 | } | |
634 | ||
4ce7cc2b | 635 | idx = get_cmd_index(q, q->write_ptr); |
da99c4b6 | 636 | out_cmd = txq->cmd[idx]; |
c2acea8e JB |
637 | out_meta = &txq->meta[idx]; |
638 | ||
3598e177 SG |
639 | if (WARN_ON(out_meta->flags & CMD_MAPPED)) { |
640 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | |
641 | return -ENOSPC; | |
642 | } | |
643 | ||
8ce73f3a | 644 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ |
c2acea8e JB |
645 | if (cmd->flags & CMD_WANT_SKB) |
646 | out_meta->source = cmd; | |
647 | if (cmd->flags & CMD_ASYNC) | |
648 | out_meta->callback = cmd->callback; | |
fd4abac5 | 649 | |
4ce7cc2b | 650 | /* set up the header */ |
fd4abac5 | 651 | |
4ce7cc2b | 652 | out_cmd->hdr.cmd = cmd->id; |
fd4abac5 | 653 | out_cmd->hdr.flags = 0; |
13bb9483 | 654 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | |
4ce7cc2b JB |
655 | INDEX_TO_SEQ(q->write_ptr)); |
656 | ||
657 | /* and copy the data that needs to be copied */ | |
658 | ||
659 | cmd_dest = &out_cmd->cmd.payload[0]; | |
660 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | |
661 | if (!cmd->len[i]) | |
662 | continue; | |
663 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) | |
664 | break; | |
665 | memcpy(cmd_dest, cmd->data[i], cmd->len[i]); | |
666 | cmd_dest += cmd->len[i]; | |
ded2ae7c | 667 | } |
4ce7cc2b JB |
668 | |
669 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " | |
670 | "%d bytes at %d[%d]:%d\n", | |
671 | get_cmd_string(out_cmd->hdr.cmd), | |
672 | out_cmd->hdr.cmd, | |
673 | le16_to_cpu(out_cmd->hdr.sequence), cmd_size, | |
674 | q->write_ptr, idx, priv->cmd_queue); | |
675 | ||
795414db EG |
676 | phys_addr = dma_map_single(priv->bus.dev, &out_cmd->hdr, copy_size, |
677 | DMA_BIDIRECTIONAL); | |
678 | if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) { | |
2c46f72e JB |
679 | idx = -ENOMEM; |
680 | goto out; | |
681 | } | |
682 | ||
2e724443 | 683 | dma_unmap_addr_set(out_meta, mapping, phys_addr); |
4ce7cc2b JB |
684 | dma_unmap_len_set(out_meta, len, copy_size); |
685 | ||
686 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); | |
687 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | |
688 | trace_bufs[0] = &out_cmd->hdr; | |
689 | trace_lens[0] = copy_size; | |
690 | trace_idx = 1; | |
691 | #endif | |
692 | ||
693 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | |
694 | if (!cmd->len[i]) | |
695 | continue; | |
696 | if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) | |
697 | continue; | |
795414db EG |
698 | phys_addr = dma_map_single(priv->bus.dev, (void *)cmd->data[i], |
699 | cmd->len[i], DMA_TO_DEVICE); | |
700 | if (dma_mapping_error(priv->bus.dev, phys_addr)) { | |
4ce7cc2b JB |
701 | iwlagn_unmap_tfd(priv, out_meta, |
702 | &txq->tfds[q->write_ptr]); | |
703 | idx = -ENOMEM; | |
704 | goto out; | |
705 | } | |
706 | ||
707 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, | |
708 | cmd->len[i], 0); | |
709 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | |
710 | trace_bufs[trace_idx] = cmd->data[i]; | |
711 | trace_lens[trace_idx] = cmd->len[i]; | |
712 | trace_idx++; | |
713 | #endif | |
714 | } | |
df833b1d | 715 | |
2c46f72e JB |
716 | out_meta->flags = cmd->flags | CMD_MAPPED; |
717 | ||
718 | txq->need_update = 1; | |
719 | ||
4ce7cc2b JB |
720 | /* check that tracing gets all possible blocks */ |
721 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); | |
722 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | |
723 | trace_iwlwifi_dev_hcmd(priv, cmd->flags, | |
724 | trace_bufs[0], trace_lens[0], | |
725 | trace_bufs[1], trace_lens[1], | |
726 | trace_bufs[2], trace_lens[2]); | |
727 | #endif | |
df833b1d | 728 | |
fd4abac5 TW |
729 | /* Increment and update queue's write index */ |
730 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
7bfedc59 | 731 | iwl_txq_update_write_ptr(priv, txq); |
fd4abac5 | 732 | |
2c46f72e | 733 | out: |
fd4abac5 | 734 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); |
7bfedc59 | 735 | return idx; |
fd4abac5 TW |
736 | } |
737 | ||
17b88929 TW |
738 | /** |
739 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | |
740 | * | |
741 | * When FW advances 'R' index, all entries between old and new 'R' index | |
742 | * need to be reclaimed. As result, some free space forms. If there is | |
743 | * enough free space (> low mark), wake the stack that feeds us. | |
744 | */ | |
20ba2861 | 745 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) |
17b88929 TW |
746 | { |
747 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
748 | struct iwl_queue *q = &txq->q; | |
749 | int nfreed = 0; | |
750 | ||
499b1883 | 751 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { |
2e5d04da DH |
752 | IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), " |
753 | "index %d is out of range [0-%d] %d %d.\n", __func__, | |
754 | txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); | |
17b88929 TW |
755 | return; |
756 | } | |
757 | ||
499b1883 TW |
758 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; |
759 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 | 760 | |
499b1883 | 761 | if (nfreed++ > 0) { |
15b1687c | 762 | IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, |
17b88929 | 763 | q->write_ptr, q->read_ptr); |
e649437f | 764 | iwlagn_fw_error(priv, false); |
17b88929 | 765 | } |
da99c4b6 | 766 | |
17b88929 TW |
767 | } |
768 | } | |
769 | ||
770 | /** | |
771 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | |
772 | * @rxb: Rx buffer to reclaim | |
773 | * | |
774 | * If an Rx buffer has an async callback associated with it the callback | |
775 | * will be executed. The attached skb (if present) will only be freed | |
776 | * if the callback returns 1 | |
777 | */ | |
778 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |
779 | { | |
2f301227 | 780 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
17b88929 TW |
781 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
782 | int txq_id = SEQ_TO_QUEUE(sequence); | |
783 | int index = SEQ_TO_INDEX(sequence); | |
17b88929 | 784 | int cmd_index; |
c2acea8e JB |
785 | struct iwl_device_cmd *cmd; |
786 | struct iwl_cmd_meta *meta; | |
13bb9483 | 787 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
3598e177 | 788 | unsigned long flags; |
17b88929 TW |
789 | |
790 | /* If a Tx command is being handled and it isn't in the actual | |
791 | * command queue then there a command routing bug has been introduced | |
792 | * in the queue management code. */ | |
13bb9483 JB |
793 | if (WARN(txq_id != priv->cmd_queue, |
794 | "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", | |
795 | txq_id, priv->cmd_queue, sequence, | |
796 | priv->txq[priv->cmd_queue].q.read_ptr, | |
797 | priv->txq[priv->cmd_queue].q.write_ptr)) { | |
ec741164 | 798 | iwl_print_hex_error(priv, pkt, 32); |
55d6a3cd | 799 | return; |
01ef9323 | 800 | } |
17b88929 | 801 | |
4ce7cc2b | 802 | cmd_index = get_cmd_index(&txq->q, index); |
dd487449 ZY |
803 | cmd = txq->cmd[cmd_index]; |
804 | meta = &txq->meta[cmd_index]; | |
17b88929 | 805 | |
4ce7cc2b | 806 | iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]); |
c33de625 | 807 | |
17b88929 | 808 | /* Input error checking is done when commands are added to queue. */ |
c2acea8e | 809 | if (meta->flags & CMD_WANT_SKB) { |
2f301227 ZY |
810 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); |
811 | rxb->page = NULL; | |
2624e96c SG |
812 | } else if (meta->callback) |
813 | meta->callback(priv, cmd, pkt); | |
814 | ||
815 | spin_lock_irqsave(&priv->hcmd_lock, flags); | |
17b88929 | 816 | |
20ba2861 | 817 | iwl_hcmd_queue_reclaim(priv, txq_id, index); |
17b88929 | 818 | |
c2acea8e | 819 | if (!(meta->flags & CMD_ASYNC)) { |
17b88929 | 820 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
91dd6c27 | 821 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
d2dfe6df | 822 | get_cmd_string(cmd->hdr.cmd)); |
17b88929 TW |
823 | wake_up_interruptible(&priv->wait_command_queue); |
824 | } | |
3598e177 SG |
825 | |
826 | /* Mark as unmapped */ | |
dd487449 | 827 | meta->flags = 0; |
3598e177 SG |
828 | |
829 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | |
17b88929 | 830 | } |