1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2019 IBM Corp.
4 #include "ocxl_internal.h"
6 static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
8 return (get_device(&fn->dev) == NULL) ? NULL : fn;
11 static void ocxl_fn_put(struct ocxl_fn *fn)
16 static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
20 afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
24 kref_init(&afu->kref);
25 mutex_init(&afu->contexts_lock);
26 mutex_init(&afu->afu_control_lock);
27 idr_init(&afu->contexts_idr);
33 static void free_afu(struct kref *kref)
35 struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
37 idr_destroy(&afu->contexts_idr);
42 void ocxl_afu_get(struct ocxl_afu *afu)
46 EXPORT_SYMBOL_GPL(ocxl_afu_get);
48 void ocxl_afu_put(struct ocxl_afu *afu)
50 kref_put(&afu->kref, free_afu);
52 EXPORT_SYMBOL_GPL(ocxl_afu_put);
54 static int assign_afu_actag(struct ocxl_afu *afu)
56 struct ocxl_fn *fn = afu->fn;
57 int actag_count, actag_offset;
58 struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
61 * if there were not enough actags for the function, each afu
62 * reduces its count as well
64 actag_count = afu->config.actag_supported *
65 fn->actag_enabled / fn->actag_supported;
66 actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
67 if (actag_offset < 0) {
68 dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
69 actag_count, actag_offset);
72 afu->actag_base = fn->actag_base + actag_offset;
73 afu->actag_enabled = actag_count;
75 ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
76 afu->actag_base, afu->actag_enabled);
77 dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
78 afu->actag_base, afu->actag_enabled);
82 static void reclaim_afu_actag(struct ocxl_afu *afu)
84 struct ocxl_fn *fn = afu->fn;
85 int start_offset, size;
87 start_offset = afu->actag_base - fn->actag_base;
88 size = afu->actag_enabled;
89 ocxl_actag_afu_free(afu->fn, start_offset, size);
92 static int assign_afu_pasid(struct ocxl_afu *afu)
94 struct ocxl_fn *fn = afu->fn;
95 int pasid_count, pasid_offset;
96 struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
99 * We only support the case where the function configuration
100 * requested enough PASIDs to cover all AFUs.
102 pasid_count = 1 << afu->config.pasid_supported_log;
103 pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
104 if (pasid_offset < 0) {
105 dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
106 pasid_count, pasid_offset);
109 afu->pasid_base = fn->pasid_base + pasid_offset;
110 afu->pasid_count = 0;
111 afu->pasid_max = pasid_count;
113 ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
115 afu->config.pasid_supported_log);
116 dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
117 afu->pasid_base, pasid_count);
121 static void reclaim_afu_pasid(struct ocxl_afu *afu)
123 struct ocxl_fn *fn = afu->fn;
124 int start_offset, size;
126 start_offset = afu->pasid_base - fn->pasid_base;
127 size = 1 << afu->config.pasid_supported_log;
128 ocxl_pasid_afu_free(afu->fn, start_offset, size);
131 static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
133 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
136 if (bar != 0 && bar != 2 && bar != 4)
140 if (fn->bar_used[idx]++ == 0) {
141 rc = pci_request_region(dev, bar, "ocxl");
148 static void release_fn_bar(struct ocxl_fn *fn, int bar)
150 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
153 if (bar != 0 && bar != 2 && bar != 4)
157 if (--fn->bar_used[idx] == 0)
158 pci_release_region(dev, bar);
159 WARN_ON(fn->bar_used[idx] < 0);
162 static int map_mmio_areas(struct ocxl_afu *afu)
165 struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
167 rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
171 rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
173 release_fn_bar(afu->fn, afu->config.global_mmio_bar);
177 afu->global_mmio_start =
178 pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
179 afu->config.global_mmio_offset;
181 pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
182 afu->config.pp_mmio_offset;
184 afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
185 afu->config.global_mmio_size);
186 if (!afu->global_mmio_ptr) {
187 release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
188 release_fn_bar(afu->fn, afu->config.global_mmio_bar);
189 dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
194 * Leave an empty page between the per-process mmio area and
195 * the AFU interrupt mappings
197 afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
201 static void unmap_mmio_areas(struct ocxl_afu *afu)
203 if (afu->global_mmio_ptr) {
204 iounmap(afu->global_mmio_ptr);
205 afu->global_mmio_ptr = NULL;
207 afu->global_mmio_start = 0;
208 afu->pp_mmio_start = 0;
209 release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
210 release_fn_bar(afu->fn, afu->config.global_mmio_bar);
213 static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
217 rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
221 rc = assign_afu_actag(afu);
225 rc = assign_afu_pasid(afu);
229 rc = map_mmio_areas(afu);
236 reclaim_afu_pasid(afu);
238 reclaim_afu_actag(afu);
242 static void deconfigure_afu(struct ocxl_afu *afu)
244 unmap_mmio_areas(afu);
245 reclaim_afu_pasid(afu);
246 reclaim_afu_actag(afu);
249 static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
251 ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
256 static void deactivate_afu(struct ocxl_afu *afu)
258 struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
260 ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
263 static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
266 struct ocxl_afu *afu;
272 rc = configure_afu(afu, afu_idx, dev);
278 rc = activate_afu(dev, afu);
280 deconfigure_afu(afu);
285 list_add_tail(&afu->list, &fn->afu_list);
290 static void remove_afu(struct ocxl_afu *afu)
292 list_del(&afu->list);
293 ocxl_context_detach_all(afu);
295 deconfigure_afu(afu);
296 ocxl_afu_put(afu); // matches the implicit get in alloc_afu
299 static struct ocxl_fn *alloc_function(void)
303 fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
307 INIT_LIST_HEAD(&fn->afu_list);
308 INIT_LIST_HEAD(&fn->pasid_list);
309 INIT_LIST_HEAD(&fn->actag_list);
314 static void free_function(struct ocxl_fn *fn)
316 WARN_ON(!list_empty(&fn->afu_list));
317 WARN_ON(!list_empty(&fn->pasid_list));
321 static void free_function_dev(struct device *dev)
323 struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
328 static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
332 fn->dev.parent = &dev->dev;
333 fn->dev.release = free_function_dev;
334 rc = dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
340 static int assign_function_actag(struct ocxl_fn *fn)
342 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
343 u16 base, enabled, supported;
346 rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
350 fn->actag_base = base;
351 fn->actag_enabled = enabled;
352 fn->actag_supported = supported;
354 ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
355 fn->actag_base, fn->actag_enabled);
356 dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
357 fn->actag_base, fn->actag_enabled);
361 static int set_function_pasid(struct ocxl_fn *fn)
363 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
364 int rc, desired_count, max_count;
366 /* A function may not require any PASID */
367 if (fn->config.max_pasid_log < 0)
370 rc = ocxl_config_get_pasid_info(dev, &max_count);
374 desired_count = 1 << fn->config.max_pasid_log;
376 if (desired_count > max_count) {
378 "Function requires more PASIDs than is available (%d vs. %d)\n",
379 desired_count, max_count);
387 static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
391 rc = pci_enable_device(dev);
393 dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
398 * Once it has been confirmed to work on our hardware, we
399 * should reset the function, to force the adapter to restart
401 * A function reset would also reset all its AFUs.
403 * Some hints for implementation:
405 * - there's not status bit to know when the reset is done. We
406 * should try reading the config space to know when it's
408 * - probably something like:
412 * allow device up to 1 sec to return success on config
413 * read before declaring it broken
415 * Some shared logic on the card (CFG, TLX) won't be reset, so
416 * there's no guarantee that it will be enough.
418 rc = ocxl_config_read_function(dev, &fn->config);
422 rc = set_function_device(fn, dev);
426 rc = assign_function_actag(fn);
430 rc = set_function_pasid(fn);
434 rc = ocxl_link_setup(dev, 0, &fn->link);
438 rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
440 ocxl_link_release(dev, fn->link);
446 static void deconfigure_function(struct ocxl_fn *fn)
448 struct pci_dev *dev = to_pci_dev(fn->dev.parent);
450 ocxl_link_release(dev, fn->link);
451 pci_disable_device(dev);
454 static struct ocxl_fn *init_function(struct pci_dev *dev)
459 fn = alloc_function();
461 return ERR_PTR(-ENOMEM);
463 rc = configure_function(fn, dev);
469 rc = device_register(&fn->dev);
471 deconfigure_function(fn);
472 put_device(&fn->dev);
478 // Device detection & initialisation
480 struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
482 int rc, afu_count = 0;
486 if (!radix_enabled()) {
487 dev_err(&dev->dev, "Unsupported memory model (hash)\n");
488 return ERR_PTR(-ENODEV);
491 fn = init_function(dev);
493 dev_err(&dev->dev, "function init failed: %li\n",
498 for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
499 rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
501 rc = init_afu(dev, fn, afu);
504 "Can't initialize AFU index %d\n", afu);
510 dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
513 EXPORT_SYMBOL_GPL(ocxl_function_open);
515 struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
517 return &fn->afu_list;
519 EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
521 struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
523 struct ocxl_afu *afu;
525 list_for_each_entry(afu, &fn->afu_list, list) {
526 if (afu->config.idx == afu_idx)
532 EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
534 const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
538 EXPORT_SYMBOL_GPL(ocxl_function_config);
540 void ocxl_function_close(struct ocxl_fn *fn)
542 struct ocxl_afu *afu, *tmp;
544 list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
548 deconfigure_function(fn);
549 device_unregister(&fn->dev);
551 EXPORT_SYMBOL_GPL(ocxl_function_close);
555 struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
559 EXPORT_SYMBOL_GPL(ocxl_afu_config);
561 void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
563 afu->private = private;
565 EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
567 void *ocxl_afu_get_private(struct ocxl_afu *afu)
574 EXPORT_SYMBOL_GPL(ocxl_afu_get_private);