1 // SPDX-License-Identifier: GPL-2.0
3 * qdio queue initialization
5 * Copyright IBM Corp. 2008
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
13 #include <asm/ebcdic.h>
22 #include "qdio_debug.h"
24 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
26 static struct kmem_cache *qdio_q_cache;
29 * qdio_free_buffers() - free qdio buffers
30 * @buf: array of pointers to qdio buffers
31 * @count: number of qdio buffers to free
33 void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
37 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
38 free_page((unsigned long) buf[pos]);
40 EXPORT_SYMBOL_GPL(qdio_free_buffers);
43 * qdio_alloc_buffers() - allocate qdio buffers
44 * @buf: array of pointers to qdio buffers
45 * @count: number of qdio buffers to allocate
47 int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
51 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
52 buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
54 qdio_free_buffers(buf, count);
58 for (pos = 0; pos < count; pos++)
59 if (pos % QBUFF_PER_PAGE)
60 buf[pos] = buf[pos - 1] + 1;
63 EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
66 * qdio_reset_buffers() - reset qdio buffers
67 * @buf: array of pointers to qdio buffers
68 * @count: number of qdio buffers that will be zeroed
70 void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
74 for (pos = 0; pos < count; pos++)
75 memset(buf[pos], 0, sizeof(struct qdio_buffer));
77 EXPORT_SYMBOL_GPL(qdio_reset_buffers);
79 static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
84 for (i = 0; i < count; i++) {
86 free_page((unsigned long)q->sl_page);
87 kmem_cache_free(qdio_q_cache, q);
91 void qdio_free_queues(struct qdio_irq *irq_ptr)
93 __qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
94 irq_ptr->max_input_qs = 0;
96 __qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
97 irq_ptr->max_output_qs = 0;
100 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
105 for (i = 0; i < nr_queues; i++) {
106 q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
108 __qdio_free_queues(irq_ptr_qs, i);
112 q->sl_page = (void *)__get_free_page(GFP_KERNEL);
114 kmem_cache_free(qdio_q_cache, q);
115 __qdio_free_queues(irq_ptr_qs, i);
118 q->slib = q->sl_page;
119 /* As per architecture: SLIB is 2K bytes long, and SL 1K. */
120 q->sl = (struct sl *)(q->slib + 1);
127 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
131 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
135 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
137 __qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
141 irq_ptr->max_input_qs = nr_input_qs;
142 irq_ptr->max_output_qs = nr_output_qs;
146 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
147 qdio_handler_t *handler, int i)
149 struct slib *const slib = q->slib;
150 void *const sl_page = q->sl_page;
151 struct sl *const sl = q->sl;
153 /* queue must be cleared for qdio_establish */
154 memset(q, 0, sizeof(*q));
155 memset(sl_page, 0, PAGE_SIZE);
156 q->sl_page = sl_page;
159 q->irq_ptr = irq_ptr;
160 q->mask = 1 << (31 - i);
162 q->handler = handler;
165 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
166 struct qdio_buffer **sbals_array, int i)
171 DBF_HEX(&q, sizeof(void *));
174 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
175 q->sbal[j] = *sbals_array++;
179 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
180 : irq_ptr->output_qs[i - 1];
181 prev->slib->nsliba = (unsigned long)q->slib;
184 q->slib->sla = (unsigned long)q->sl;
185 q->slib->slsba = (unsigned long)&q->slsb.val[0];
188 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
189 q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]);
192 static void setup_queues(struct qdio_irq *irq_ptr,
193 struct qdio_initialize *qdio_init)
198 for_each_input_queue(irq_ptr, q, i) {
199 DBF_EVENT("inq:%1d", i);
200 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
204 setup_storage_lists(q, irq_ptr,
205 qdio_init->input_sbal_addr_array[i], i);
208 for_each_output_queue(irq_ptr, q, i) {
209 DBF_EVENT("outq:%1d", i);
210 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
213 setup_storage_lists(q, irq_ptr,
214 qdio_init->output_sbal_addr_array[i], i);
218 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
219 unsigned char qdioac, unsigned long token)
221 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
223 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
224 (!(qdioac & AC1_SC_QEBSM_ENABLED)))
227 irq_ptr->sch_token = token;
230 DBF_EVENT("%8lx", irq_ptr->sch_token);
234 irq_ptr->sch_token = 0;
235 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
240 * If there is a qdio_irq we use the chsc_page and store the information
241 * in the qdio_irq, otherwise we copy it to the specified structure.
243 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
244 struct subchannel_id *schid,
245 struct qdio_ssqd_desc *data)
247 struct chsc_ssqd_area *ssqd;
250 DBF_EVENT("getssqd:%4x", schid->sch_no);
252 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
256 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
259 rc = chsc_ssqd(*schid, ssqd);
263 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
264 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
265 (ssqd->qdio_ssqd.sch != schid->sch_no))
269 memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
273 free_page((unsigned long)ssqd);
278 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
280 unsigned char qdioac;
283 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
285 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
286 DBF_ERROR("rc:%x", rc);
287 /* all flags set, worst case */
288 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
289 AC1_SIGA_SYNC_NEEDED;
291 qdioac = irq_ptr->ssqd_desc.qdioac1;
293 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
294 irq_ptr->qdioac1 = qdioac;
295 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
296 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
299 static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
301 desc->sliba = virt_to_dma64(queue->slib);
302 desc->sla = virt_to_dma64(queue->sl);
303 desc->slsba = virt_to_dma64(&queue->slsb);
305 desc->akey = PAGE_DEFAULT_KEY >> 4;
306 desc->bkey = PAGE_DEFAULT_KEY >> 4;
307 desc->ckey = PAGE_DEFAULT_KEY >> 4;
308 desc->dkey = PAGE_DEFAULT_KEY >> 4;
311 static void setup_qdr(struct qdio_irq *irq_ptr,
312 struct qdio_initialize *qdio_init)
314 struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
317 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
319 irq_ptr->qdr->qfmt = qdio_init->q_format;
320 irq_ptr->qdr->ac = qdio_init->qdr_ac;
321 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
322 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
323 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
324 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
325 irq_ptr->qdr->qiba = virt_to_dma64(&irq_ptr->qib);
326 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
328 for (i = 0; i < qdio_init->no_input_qs; i++)
329 qdio_fill_qdr_desc(desc++, irq_ptr->input_qs[i]);
331 for (i = 0; i < qdio_init->no_output_qs; i++)
332 qdio_fill_qdr_desc(desc++, irq_ptr->output_qs[i]);
335 static void setup_qib(struct qdio_irq *irq_ptr,
336 struct qdio_initialize *init_data)
338 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
340 irq_ptr->qib.qfmt = init_data->q_format;
341 irq_ptr->qib.pfmt = init_data->qib_param_field_format;
343 irq_ptr->qib.rflags = init_data->qib_rflags;
344 if (css_general_characteristics.qebsm)
345 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
347 if (init_data->no_input_qs)
348 irq_ptr->qib.isliba =
349 (unsigned long)(irq_ptr->input_qs[0]->slib);
350 if (init_data->no_output_qs)
351 irq_ptr->qib.osliba =
352 (unsigned long)(irq_ptr->output_qs[0]->slib);
353 memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
354 ASCEBC(irq_ptr->qib.ebcnam, 8);
356 if (init_data->qib_param_field)
357 memcpy(irq_ptr->qib.parm, init_data->qib_param_field,
358 sizeof(irq_ptr->qib.parm));
361 void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
363 struct ccw_device *cdev = irq_ptr->cdev;
365 irq_ptr->qdioac1 = 0;
366 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
367 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
369 irq_ptr->debugfs_dev = NULL;
370 irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
371 irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
372 irq_ptr->error_handler = init_data->input_handler;
374 irq_ptr->int_parm = init_data->int_parm;
375 irq_ptr->nr_input_qs = init_data->no_input_qs;
376 irq_ptr->nr_output_qs = init_data->no_output_qs;
377 ccw_device_get_schid(cdev, &irq_ptr->schid);
378 setup_queues(irq_ptr, init_data);
380 irq_ptr->irq_poll = init_data->irq_poll;
381 set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
383 setup_qib(irq_ptr, init_data);
385 /* fill input and output descriptors */
386 setup_qdr(irq_ptr, init_data);
388 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
390 /* set our IRQ handler */
391 spin_lock_irq(get_ccwdev_lock(cdev));
392 irq_ptr->orig_handler = cdev->handler;
393 cdev->handler = qdio_int_handler;
394 spin_unlock_irq(get_ccwdev_lock(cdev));
397 void qdio_shutdown_irq(struct qdio_irq *irq)
399 struct ccw_device *cdev = irq->cdev;
401 /* restore IRQ handler */
402 spin_lock_irq(get_ccwdev_lock(cdev));
403 cdev->handler = irq->orig_handler;
404 cdev->private->intparm = 0;
405 spin_unlock_irq(get_ccwdev_lock(cdev));
408 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
410 dev_info(&irq_ptr->cdev->dev,
411 "qdio: %s on SC %x using AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s\n",
412 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
413 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
414 irq_ptr->schid.sch_no,
415 is_thinint_irq(irq_ptr),
416 (irq_ptr->sch_token) ? 1 : 0,
417 pci_out_supported(irq_ptr) ? 1 : 0,
418 css_general_characteristics.aif_tdd,
419 qdio_need_siga_in(irq_ptr) ? "R" : " ",
420 qdio_need_siga_out(irq_ptr) ? "W" : " ",
421 qdio_need_siga_sync(irq_ptr) ? "S" : " ");
424 int __init qdio_setup_init(void)
426 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
431 /* Check for OSA/FCP thin interrupts (bit 67). */
432 DBF_EVENT("thinint:%1d",
433 (css_general_characteristics.aif_qdio) ? 1 : 0);
435 /* Check for QEBSM support in general (bit 58). */
436 DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
441 void qdio_setup_exit(void)
443 kmem_cache_destroy(qdio_q_cache);