2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/sysfs.h>
13 #include <linux/pci_regs.h>
17 #define to_afu_chardev_m(d) dev_get_drvdata(d)
19 /********* Adapter attributes **********************************************/
21 static ssize_t caia_version_show(struct device *device,
22 struct device_attribute *attr,
25 struct cxl *adapter = to_cxl_adapter(device);
27 return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
31 static ssize_t psl_revision_show(struct device *device,
32 struct device_attribute *attr,
35 struct cxl *adapter = to_cxl_adapter(device);
37 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
40 static ssize_t base_image_show(struct device *device,
41 struct device_attribute *attr,
44 struct cxl *adapter = to_cxl_adapter(device);
46 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
49 static ssize_t image_loaded_show(struct device *device,
50 struct device_attribute *attr,
53 struct cxl *adapter = to_cxl_adapter(device);
55 if (adapter->user_image_loaded)
56 return scnprintf(buf, PAGE_SIZE, "user\n");
57 return scnprintf(buf, PAGE_SIZE, "factory\n");
60 static ssize_t psl_timebase_synced_show(struct device *device,
61 struct device_attribute *attr,
64 struct cxl *adapter = to_cxl_adapter(device);
67 /* Recompute the status only in native mode */
68 if (cpu_has_feature(CPU_FTR_HVMODE)) {
69 psl_tb = adapter->native->sl_ops->timebase_read(adapter);
70 delta = abs(mftb() - psl_tb);
72 /* CORE TB and PSL TB difference <= 16usecs ? */
73 adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
74 pr_devel("PSL timebase %s - delta: 0x%016llx\n",
75 (tb_to_ns(delta) < 16000) ? "synchronized" :
76 "not synchronized", tb_to_ns(delta));
78 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
81 static ssize_t tunneled_ops_supported_show(struct device *device,
82 struct device_attribute *attr,
85 struct cxl *adapter = to_cxl_adapter(device);
87 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
90 static ssize_t reset_adapter_store(struct device *device,
91 struct device_attribute *attr,
92 const char *buf, size_t count)
94 struct cxl *adapter = to_cxl_adapter(device);
98 rc = sscanf(buf, "%i", &val);
99 if ((rc != 1) || (val != 1 && val != -1))
103 * See if we can lock the context mapping that's only allowed
104 * when there are no contexts attached to the adapter. Once
105 * taken this will also prevent any context from getting activated.
108 rc = cxl_adapter_context_lock(adapter);
112 rc = cxl_ops->adapter_reset(adapter);
113 /* In case reset failed release context lock */
115 cxl_adapter_context_unlock(adapter);
117 } else if (val == -1) {
118 /* Perform a forced adapter reset */
119 rc = cxl_ops->adapter_reset(adapter);
123 return rc ? rc : count;
126 static ssize_t load_image_on_perst_show(struct device *device,
127 struct device_attribute *attr,
130 struct cxl *adapter = to_cxl_adapter(device);
132 if (!adapter->perst_loads_image)
133 return scnprintf(buf, PAGE_SIZE, "none\n");
135 if (adapter->perst_select_user)
136 return scnprintf(buf, PAGE_SIZE, "user\n");
137 return scnprintf(buf, PAGE_SIZE, "factory\n");
140 static ssize_t load_image_on_perst_store(struct device *device,
141 struct device_attribute *attr,
142 const char *buf, size_t count)
144 struct cxl *adapter = to_cxl_adapter(device);
147 if (!strncmp(buf, "none", 4))
148 adapter->perst_loads_image = false;
149 else if (!strncmp(buf, "user", 4)) {
150 adapter->perst_select_user = true;
151 adapter->perst_loads_image = true;
152 } else if (!strncmp(buf, "factory", 7)) {
153 adapter->perst_select_user = false;
154 adapter->perst_loads_image = true;
158 if ((rc = cxl_update_image_control(adapter)))
164 static ssize_t perst_reloads_same_image_show(struct device *device,
165 struct device_attribute *attr,
168 struct cxl *adapter = to_cxl_adapter(device);
170 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
173 static ssize_t perst_reloads_same_image_store(struct device *device,
174 struct device_attribute *attr,
175 const char *buf, size_t count)
177 struct cxl *adapter = to_cxl_adapter(device);
181 rc = sscanf(buf, "%i", &val);
182 if ((rc != 1) || !(val == 1 || val == 0))
185 adapter->perst_same_image = (val == 1 ? true : false);
189 static struct device_attribute adapter_attrs[] = {
190 __ATTR_RO(caia_version),
191 __ATTR_RO(psl_revision),
192 __ATTR_RO(base_image),
193 __ATTR_RO(image_loaded),
194 __ATTR_RO(psl_timebase_synced),
195 __ATTR_RO(tunneled_ops_supported),
196 __ATTR_RW(load_image_on_perst),
197 __ATTR_RW(perst_reloads_same_image),
198 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
202 /********* AFU master specific attributes **********************************/
204 static ssize_t mmio_size_show_master(struct device *device,
205 struct device_attribute *attr,
208 struct cxl_afu *afu = to_afu_chardev_m(device);
210 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
213 static ssize_t pp_mmio_off_show(struct device *device,
214 struct device_attribute *attr,
217 struct cxl_afu *afu = to_afu_chardev_m(device);
219 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
222 static ssize_t pp_mmio_len_show(struct device *device,
223 struct device_attribute *attr,
226 struct cxl_afu *afu = to_afu_chardev_m(device);
228 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
231 static struct device_attribute afu_master_attrs[] = {
232 __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
233 __ATTR_RO(pp_mmio_off),
234 __ATTR_RO(pp_mmio_len),
238 /********* AFU attributes **************************************************/
240 static ssize_t mmio_size_show(struct device *device,
241 struct device_attribute *attr,
244 struct cxl_afu *afu = to_cxl_afu(device);
247 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
248 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
251 static ssize_t reset_store_afu(struct device *device,
252 struct device_attribute *attr,
253 const char *buf, size_t count)
255 struct cxl_afu *afu = to_cxl_afu(device);
258 /* Not safe to reset if it is currently in use */
259 mutex_lock(&afu->contexts_lock);
260 if (!idr_is_empty(&afu->contexts_idr)) {
265 if ((rc = cxl_ops->afu_reset(afu)))
270 mutex_unlock(&afu->contexts_lock);
274 static ssize_t irqs_min_show(struct device *device,
275 struct device_attribute *attr,
278 struct cxl_afu *afu = to_cxl_afu(device);
280 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
283 static ssize_t irqs_max_show(struct device *device,
284 struct device_attribute *attr,
287 struct cxl_afu *afu = to_cxl_afu(device);
289 return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
292 static ssize_t irqs_max_store(struct device *device,
293 struct device_attribute *attr,
294 const char *buf, size_t count)
296 struct cxl_afu *afu = to_cxl_afu(device);
300 ret = sscanf(buf, "%i", &irqs_max);
304 if (irqs_max < afu->pp_irqs)
307 if (cpu_has_feature(CPU_FTR_HVMODE)) {
308 if (irqs_max > afu->adapter->user_irqs)
311 /* pHyp sets a per-AFU limit */
312 if (irqs_max > afu->guest->max_ints)
316 afu->irqs_max = irqs_max;
320 static ssize_t modes_supported_show(struct device *device,
321 struct device_attribute *attr, char *buf)
323 struct cxl_afu *afu = to_cxl_afu(device);
324 char *p = buf, *end = buf + PAGE_SIZE;
326 if (afu->modes_supported & CXL_MODE_DEDICATED)
327 p += scnprintf(p, end - p, "dedicated_process\n");
328 if (afu->modes_supported & CXL_MODE_DIRECTED)
329 p += scnprintf(p, end - p, "afu_directed\n");
333 static ssize_t prefault_mode_show(struct device *device,
334 struct device_attribute *attr,
337 struct cxl_afu *afu = to_cxl_afu(device);
339 switch (afu->prefault_mode) {
340 case CXL_PREFAULT_WED:
341 return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
342 case CXL_PREFAULT_ALL:
343 return scnprintf(buf, PAGE_SIZE, "all\n");
345 return scnprintf(buf, PAGE_SIZE, "none\n");
349 static ssize_t prefault_mode_store(struct device *device,
350 struct device_attribute *attr,
351 const char *buf, size_t count)
353 struct cxl_afu *afu = to_cxl_afu(device);
354 enum prefault_modes mode = -1;
356 if (!strncmp(buf, "work_element_descriptor", 23))
357 mode = CXL_PREFAULT_WED;
358 if (!strncmp(buf, "all", 3))
359 mode = CXL_PREFAULT_ALL;
360 if (!strncmp(buf, "none", 4))
361 mode = CXL_PREFAULT_NONE;
366 afu->prefault_mode = mode;
370 static ssize_t mode_show(struct device *device,
371 struct device_attribute *attr,
374 struct cxl_afu *afu = to_cxl_afu(device);
376 if (afu->current_mode == CXL_MODE_DEDICATED)
377 return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
378 if (afu->current_mode == CXL_MODE_DIRECTED)
379 return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
380 return scnprintf(buf, PAGE_SIZE, "none\n");
383 static ssize_t mode_store(struct device *device, struct device_attribute *attr,
384 const char *buf, size_t count)
386 struct cxl_afu *afu = to_cxl_afu(device);
387 int old_mode, mode = -1;
390 /* can't change this if we have a user */
391 mutex_lock(&afu->contexts_lock);
392 if (!idr_is_empty(&afu->contexts_idr))
395 if (!strncmp(buf, "dedicated_process", 17))
396 mode = CXL_MODE_DEDICATED;
397 if (!strncmp(buf, "afu_directed", 12))
398 mode = CXL_MODE_DIRECTED;
399 if (!strncmp(buf, "none", 4))
408 * afu_deactivate_mode needs to be done outside the lock, prevent
409 * other contexts coming in before we are ready:
411 old_mode = afu->current_mode;
412 afu->current_mode = 0;
415 mutex_unlock(&afu->contexts_lock);
417 if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
419 if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
424 mutex_unlock(&afu->contexts_lock);
428 static ssize_t api_version_show(struct device *device,
429 struct device_attribute *attr,
432 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
435 static ssize_t api_version_compatible_show(struct device *device,
436 struct device_attribute *attr,
439 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
442 static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
443 struct bin_attribute *bin_attr, char *buf,
444 loff_t off, size_t count)
446 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
448 return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
451 static struct device_attribute afu_attrs[] = {
452 __ATTR_RO(mmio_size),
455 __ATTR_RO(modes_supported),
457 __ATTR_RW(prefault_mode),
458 __ATTR_RO(api_version),
459 __ATTR_RO(api_version_compatible),
460 __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
463 int cxl_sysfs_adapter_add(struct cxl *adapter)
465 struct device_attribute *dev_attr;
468 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
469 dev_attr = &adapter_attrs[i];
470 if (cxl_ops->support_attributes(dev_attr->attr.name,
471 CXL_ADAPTER_ATTRS)) {
472 if ((rc = device_create_file(&adapter->dev, dev_attr)))
478 for (i--; i >= 0; i--) {
479 dev_attr = &adapter_attrs[i];
480 if (cxl_ops->support_attributes(dev_attr->attr.name,
482 device_remove_file(&adapter->dev, dev_attr);
487 void cxl_sysfs_adapter_remove(struct cxl *adapter)
489 struct device_attribute *dev_attr;
492 for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
493 dev_attr = &adapter_attrs[i];
494 if (cxl_ops->support_attributes(dev_attr->attr.name,
496 device_remove_file(&adapter->dev, dev_attr);
500 struct afu_config_record {
502 struct bin_attribute config_attr;
503 struct list_head list;
510 #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
512 static ssize_t vendor_show(struct kobject *kobj,
513 struct kobj_attribute *attr, char *buf)
515 struct afu_config_record *cr = to_cr(kobj);
517 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
520 static ssize_t device_show(struct kobject *kobj,
521 struct kobj_attribute *attr, char *buf)
523 struct afu_config_record *cr = to_cr(kobj);
525 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
528 static ssize_t class_show(struct kobject *kobj,
529 struct kobj_attribute *attr, char *buf)
531 struct afu_config_record *cr = to_cr(kobj);
533 return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
536 static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
537 struct bin_attribute *bin_attr, char *buf,
538 loff_t off, size_t count)
540 struct afu_config_record *cr = to_cr(kobj);
541 struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
545 for (i = 0; i < count;) {
546 rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
549 for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
550 buf[i] = (val >> (j * 8)) & 0xff;
556 static struct kobj_attribute vendor_attribute =
558 static struct kobj_attribute device_attribute =
560 static struct kobj_attribute class_attribute =
563 static struct attribute *afu_cr_attrs[] = {
564 &vendor_attribute.attr,
565 &device_attribute.attr,
566 &class_attribute.attr,
570 static void release_afu_config_record(struct kobject *kobj)
572 struct afu_config_record *cr = to_cr(kobj);
577 static struct kobj_type afu_config_record_type = {
578 .sysfs_ops = &kobj_sysfs_ops,
579 .release = release_afu_config_record,
580 .default_attrs = afu_cr_attrs,
583 static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
585 struct afu_config_record *cr;
588 cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
590 return ERR_PTR(-ENOMEM);
594 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
597 rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
600 rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
606 * Export raw AFU PCIe like config record. For now this is read only by
607 * root - we can expand that later to be readable by non-root and maybe
608 * even writable provided we have a good use-case. Once we support
609 * exposing AFUs through a virtual PHB they will get that for free from
610 * Linux' PCI infrastructure, but until then it's not clear that we
611 * need it for anything since the main use case is just identifying
612 * AFUs, which can be done via the vendor, device and class attributes.
614 sysfs_bin_attr_init(&cr->config_attr);
615 cr->config_attr.attr.name = "config";
616 cr->config_attr.attr.mode = S_IRUSR;
617 cr->config_attr.size = afu->crs_len;
618 cr->config_attr.read = afu_read_config;
620 rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
621 &afu->dev.kobj, "cr%i", cr->cr);
625 rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
629 rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
635 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
637 kobject_put(&cr->kobj);
644 void cxl_sysfs_afu_remove(struct cxl_afu *afu)
646 struct device_attribute *dev_attr;
647 struct afu_config_record *cr, *tmp;
650 /* remove the err buffer bin attribute */
652 device_remove_bin_file(&afu->dev, &afu->attr_eb);
654 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
655 dev_attr = &afu_attrs[i];
656 if (cxl_ops->support_attributes(dev_attr->attr.name,
658 device_remove_file(&afu->dev, &afu_attrs[i]);
661 list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
662 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
663 kobject_put(&cr->kobj);
667 int cxl_sysfs_afu_add(struct cxl_afu *afu)
669 struct device_attribute *dev_attr;
670 struct afu_config_record *cr;
673 INIT_LIST_HEAD(&afu->crs);
675 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
676 dev_attr = &afu_attrs[i];
677 if (cxl_ops->support_attributes(dev_attr->attr.name,
679 if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
684 /* conditionally create the add the binary file for error info buffer */
686 sysfs_attr_init(&afu->attr_eb.attr);
688 afu->attr_eb.attr.name = "afu_err_buff";
689 afu->attr_eb.attr.mode = S_IRUGO;
690 afu->attr_eb.size = afu->eb_len;
691 afu->attr_eb.read = afu_eb_read;
693 rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
696 "Unable to create eb attr for the afu. Err(%d)\n",
702 for (i = 0; i < afu->crs_num; i++) {
703 cr = cxl_sysfs_afu_new_cr(afu, i);
708 list_add(&cr->list, &afu->crs);
714 cxl_sysfs_afu_remove(afu);
717 /* reset the eb_len as we havent created the bin attr */
720 for (i--; i >= 0; i--) {
721 dev_attr = &afu_attrs[i];
722 if (cxl_ops->support_attributes(dev_attr->attr.name,
724 device_remove_file(&afu->dev, &afu_attrs[i]);
729 int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
731 struct device_attribute *dev_attr;
734 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
735 dev_attr = &afu_master_attrs[i];
736 if (cxl_ops->support_attributes(dev_attr->attr.name,
737 CXL_AFU_MASTER_ATTRS)) {
738 if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
746 for (i--; i >= 0; i--) {
747 dev_attr = &afu_master_attrs[i];
748 if (cxl_ops->support_attributes(dev_attr->attr.name,
749 CXL_AFU_MASTER_ATTRS))
750 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
755 void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
757 struct device_attribute *dev_attr;
760 for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
761 dev_attr = &afu_master_attrs[i];
762 if (cxl_ops->support_attributes(dev_attr->attr.name,
763 CXL_AFU_MASTER_ATTRS))
764 device_remove_file(afu->chardev_m, &afu_master_attrs[i]);