]>
Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 5 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * | |
8 | * This software is available to you under a choice of one of two | |
9 | * licenses. You may choose to be licensed under the terms of the GNU | |
10 | * General Public License (GPL) Version 2, available from the file | |
11 | * COPYING in the main directory of this source tree, or the | |
12 | * OpenIB.org BSD license below: | |
13 | * | |
14 | * Redistribution and use in source and binary forms, with or | |
15 | * without modification, are permitted provided that the following | |
16 | * conditions are met: | |
17 | * | |
18 | * - Redistributions of source code must retain the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer. | |
21 | * | |
22 | * - Redistributions in binary form must reproduce the above | |
23 | * copyright notice, this list of conditions and the following | |
24 | * disclaimer in the documentation and/or other materials | |
25 | * provided with the distribution. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
34 | * SOFTWARE. | |
35 | */ | |
36 | ||
225c7b1f RD |
37 | #include <linux/hardirq.h> |
38 | ||
39 | #include <linux/mlx4/cmd.h> | |
3fdcb97f | 40 | #include <linux/mlx4/cq.h> |
225c7b1f RD |
41 | |
42 | #include "mlx4.h" | |
43 | #include "icm.h" | |
44 | ||
45 | struct mlx4_cq_context { | |
46 | __be32 flags; | |
47 | u16 reserved1[3]; | |
48 | __be16 page_offset; | |
49 | __be32 logsize_usrpage; | |
3fdcb97f EC |
50 | __be16 cq_period; |
51 | __be16 cq_max_count; | |
52 | u8 reserved2[3]; | |
225c7b1f RD |
53 | u8 comp_eqn; |
54 | u8 log_page_size; | |
3fdcb97f | 55 | u8 reserved3[2]; |
225c7b1f RD |
56 | u8 mtt_base_addr_h; |
57 | __be32 mtt_base_addr_l; | |
58 | __be32 last_notified_index; | |
59 | __be32 solicit_producer_index; | |
60 | __be32 consumer_index; | |
61 | __be32 producer_index; | |
3fdcb97f | 62 | u32 reserved4[2]; |
225c7b1f RD |
63 | __be64 db_rec_addr; |
64 | }; | |
65 | ||
66 | #define MLX4_CQ_STATUS_OK ( 0 << 28) | |
67 | #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) | |
68 | #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) | |
69 | #define MLX4_CQ_FLAG_CC ( 1 << 18) | |
70 | #define MLX4_CQ_FLAG_OI ( 1 << 17) | |
71 | #define MLX4_CQ_STATE_ARMED ( 9 << 8) | |
72 | #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) | |
73 | #define MLX4_EQ_STATE_FIRED (10 << 8) | |
74 | ||
75 | void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) | |
76 | { | |
77 | struct mlx4_cq *cq; | |
78 | ||
79 | cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, | |
80 | cqn & (dev->caps.num_cqs - 1)); | |
81 | if (!cq) { | |
82 | mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn); | |
83 | return; | |
84 | } | |
85 | ||
86 | ++cq->arm_sn; | |
87 | ||
88 | cq->comp(cq); | |
89 | } | |
90 | ||
91 | void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) | |
92 | { | |
93 | struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; | |
94 | struct mlx4_cq *cq; | |
95 | ||
96 | spin_lock(&cq_table->lock); | |
97 | ||
98 | cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); | |
99 | if (cq) | |
100 | atomic_inc(&cq->refcount); | |
101 | ||
102 | spin_unlock(&cq_table->lock); | |
103 | ||
104 | if (!cq) { | |
105 | mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); | |
106 | return; | |
107 | } | |
108 | ||
109 | cq->event(cq, event_type); | |
110 | ||
111 | if (atomic_dec_and_test(&cq->refcount)) | |
112 | complete(&cq->free); | |
113 | } | |
114 | ||
115 | static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
116 | int cq_num) | |
117 | { | |
118 | return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, | |
119 | MLX4_CMD_TIME_CLASS_A); | |
120 | } | |
121 | ||
3fdcb97f EC |
122 | static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, |
123 | int cq_num, u32 opmod) | |
124 | { | |
125 | return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, | |
126 | MLX4_CMD_TIME_CLASS_A); | |
127 | } | |
128 | ||
225c7b1f RD |
129 | static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, |
130 | int cq_num) | |
131 | { | |
132 | return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, | |
133 | mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, | |
134 | MLX4_CMD_TIME_CLASS_A); | |
135 | } | |
136 | ||
3fdcb97f EC |
137 | int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, |
138 | u16 count, u16 period) | |
139 | { | |
140 | struct mlx4_cmd_mailbox *mailbox; | |
141 | struct mlx4_cq_context *cq_context; | |
142 | int err; | |
143 | ||
144 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
145 | if (IS_ERR(mailbox)) | |
146 | return PTR_ERR(mailbox); | |
147 | ||
148 | cq_context = mailbox->buf; | |
149 | memset(cq_context, 0, sizeof *cq_context); | |
150 | ||
151 | cq_context->cq_max_count = cpu_to_be16(count); | |
152 | cq_context->cq_period = cpu_to_be16(period); | |
153 | ||
154 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); | |
155 | ||
156 | mlx4_free_cmd_mailbox(dev, mailbox); | |
157 | return err; | |
158 | } | |
159 | EXPORT_SYMBOL_GPL(mlx4_cq_modify); | |
160 | ||
bbf8eed1 VS |
161 | int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, |
162 | int entries, struct mlx4_mtt *mtt) | |
163 | { | |
164 | struct mlx4_cmd_mailbox *mailbox; | |
165 | struct mlx4_cq_context *cq_context; | |
166 | u64 mtt_addr; | |
167 | int err; | |
168 | ||
169 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
170 | if (IS_ERR(mailbox)) | |
171 | return PTR_ERR(mailbox); | |
172 | ||
173 | cq_context = mailbox->buf; | |
174 | memset(cq_context, 0, sizeof *cq_context); | |
175 | ||
176 | cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); | |
177 | cq_context->log_page_size = mtt->page_shift - 12; | |
178 | mtt_addr = mlx4_mtt_addr(dev, mtt); | |
179 | cq_context->mtt_base_addr_h = mtt_addr >> 32; | |
180 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
181 | ||
f5b3a096 | 182 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); |
bbf8eed1 VS |
183 | |
184 | mlx4_free_cmd_mailbox(dev, mailbox); | |
185 | return err; | |
186 | } | |
187 | EXPORT_SYMBOL_GPL(mlx4_cq_resize); | |
188 | ||
225c7b1f | 189 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, |
e463c7b1 | 190 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, |
b8dd786f | 191 | unsigned vector, int collapsed) |
225c7b1f RD |
192 | { |
193 | struct mlx4_priv *priv = mlx4_priv(dev); | |
194 | struct mlx4_cq_table *cq_table = &priv->cq_table; | |
195 | struct mlx4_cmd_mailbox *mailbox; | |
196 | struct mlx4_cq_context *cq_context; | |
197 | u64 mtt_addr; | |
198 | int err; | |
199 | ||
b8dd786f YP |
200 | if (vector >= dev->caps.num_comp_vectors) |
201 | return -EINVAL; | |
202 | ||
203 | cq->vector = vector; | |
204 | ||
225c7b1f RD |
205 | cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); |
206 | if (cq->cqn == -1) | |
207 | return -ENOMEM; | |
208 | ||
209 | err = mlx4_table_get(dev, &cq_table->table, cq->cqn); | |
210 | if (err) | |
211 | goto err_out; | |
212 | ||
213 | err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); | |
214 | if (err) | |
215 | goto err_put; | |
216 | ||
217 | spin_lock_irq(&cq_table->lock); | |
218 | err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); | |
219 | spin_unlock_irq(&cq_table->lock); | |
220 | if (err) | |
221 | goto err_cmpt_put; | |
222 | ||
223 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
224 | if (IS_ERR(mailbox)) { | |
225 | err = PTR_ERR(mailbox); | |
226 | goto err_radix; | |
227 | } | |
228 | ||
229 | cq_context = mailbox->buf; | |
230 | memset(cq_context, 0, sizeof *cq_context); | |
231 | ||
e463c7b1 | 232 | cq_context->flags = cpu_to_be32(!!collapsed << 18); |
225c7b1f | 233 | cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); |
b8dd786f | 234 | cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; |
225c7b1f RD |
235 | cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; |
236 | ||
237 | mtt_addr = mlx4_mtt_addr(dev, mtt); | |
238 | cq_context->mtt_base_addr_h = mtt_addr >> 32; | |
239 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
240 | cq_context->db_rec_addr = cpu_to_be64(db_rec); | |
241 | ||
242 | err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); | |
243 | mlx4_free_cmd_mailbox(dev, mailbox); | |
244 | if (err) | |
245 | goto err_radix; | |
246 | ||
247 | cq->cons_index = 0; | |
248 | cq->arm_sn = 1; | |
249 | cq->uar = uar; | |
250 | atomic_set(&cq->refcount, 1); | |
251 | init_completion(&cq->free); | |
252 | ||
253 | return 0; | |
254 | ||
255 | err_radix: | |
256 | spin_lock_irq(&cq_table->lock); | |
257 | radix_tree_delete(&cq_table->tree, cq->cqn); | |
258 | spin_unlock_irq(&cq_table->lock); | |
259 | ||
260 | err_cmpt_put: | |
261 | mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); | |
262 | ||
263 | err_put: | |
264 | mlx4_table_put(dev, &cq_table->table, cq->cqn); | |
265 | ||
266 | err_out: | |
267 | mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); | |
268 | ||
269 | return err; | |
270 | } | |
271 | EXPORT_SYMBOL_GPL(mlx4_cq_alloc); | |
272 | ||
273 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) | |
274 | { | |
275 | struct mlx4_priv *priv = mlx4_priv(dev); | |
276 | struct mlx4_cq_table *cq_table = &priv->cq_table; | |
277 | int err; | |
278 | ||
279 | err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); | |
280 | if (err) | |
281 | mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); | |
282 | ||
b8dd786f | 283 | synchronize_irq(priv->eq_table.eq[cq->vector].irq); |
225c7b1f RD |
284 | |
285 | spin_lock_irq(&cq_table->lock); | |
286 | radix_tree_delete(&cq_table->tree, cq->cqn); | |
287 | spin_unlock_irq(&cq_table->lock); | |
288 | ||
289 | if (atomic_dec_and_test(&cq->refcount)) | |
290 | complete(&cq->free); | |
291 | wait_for_completion(&cq->free); | |
292 | ||
293 | mlx4_table_put(dev, &cq_table->table, cq->cqn); | |
294 | mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(mlx4_cq_free); | |
297 | ||
3d73c288 | 298 | int mlx4_init_cq_table(struct mlx4_dev *dev) |
225c7b1f RD |
299 | { |
300 | struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; | |
301 | int err; | |
302 | ||
303 | spin_lock_init(&cq_table->lock); | |
304 | INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); | |
305 | ||
306 | err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, | |
93fc9e1b | 307 | dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); |
225c7b1f RD |
308 | if (err) |
309 | return err; | |
310 | ||
311 | return 0; | |
312 | } | |
313 | ||
314 | void mlx4_cleanup_cq_table(struct mlx4_dev *dev) | |
315 | { | |
316 | /* Nothing to do to clean up radix_tree */ | |
317 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); | |
318 | } |