#include <linux/pci.h>
#include <acpi/apei.h>
#include <linux/suspend.h>
++#include <linux/prmt.h>
#include "internal.h"
out_kfree:
kfree(output.pointer);
-- if (status != AE_OK)
-- context->ret.pointer = NULL;
return status;
}
EXPORT_SYMBOL(acpi_run_osc);
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
++ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
#ifdef CONFIG_ARM64
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
-- capbuf_ret = context.ret.pointer;
-- if (context.ret.length <= OSC_SUPPORT_DWORD) {
-- kfree(context.ret.pointer);
-- return;
-- }
++ kfree(context.ret.pointer);
-- /*
-- * Now run _OSC again with query flag clear and with the caps
-- * supported by both the OS and the platform.
-- */
++ /* Now run _OSC again with query flag clear */
capbuf[OSC_QUERY_DWORD] = 0;
-- capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
-- kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
-- if (context.ret.length > OSC_SUPPORT_DWORD) {
-- osc_sb_apei_support_acked =
-- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
-- osc_pc_lpi_support_confirmed =
-- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
-- osc_sb_native_usb4_support_confirmed =
-- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
-- }
++ osc_sb_apei_support_acked =
++ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
++ osc_pc_lpi_support_confirmed =
++ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
++ osc_sb_native_usb4_support_confirmed =
++ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
kfree(context.ret.pointer);
}
static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
{
-- printk(KERN_INFO PREFIX "%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
++ pr_info("%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
(bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
(bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
(bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
return;
if (context.ret.length != sizeof(capbuf)) {
-- printk(KERN_INFO PREFIX "USB4 _OSC: returned invalid length buffer\n");
++ pr_info("USB4 _OSC: returned invalid length buffer\n");
goto out_free;
}
static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
{
-- acpi_scan_table_handler(event, table, context);
++ if (event == ACPI_TABLE_EVENT_LOAD)
++ acpi_scan_table_notify();
return acpi_sysfs_table_handler(event, table, context);
}
acpi_kobj = NULL;
}
++ init_prmt();
result = acpi_bus_init();
if (result) {
disable_acpi();
#include <linux/idr.h>
-- #define PREFIX "ACPI: "
--
int early_acpi_osi_init(void);
int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
--void acpi_scan_table_handler(u32 event, void *table, void *context);
++void acpi_scan_table_notify(void);
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
- int acpi_add_power_resource(acpi_handle handle);
+ struct acpi_device *acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
-void acpi_turn_off_unused_power_resources(bool init);
++void acpi_turn_off_unused_power_resources(void);
/* --------------------------------------------------------------------------
Device Power Management
-- * - Added processor hotplug support
++ * - Added processor hotplug support
*/
++ #define pr_fmt(fmt) "ACPI: " fmt
++
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/uaccess.h>
-- #define PREFIX "ACPI: "
--
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
* 1 -> acpi processor driver ignores _TPC values
{
if (acpi_processor_update_tsd_coord())
pr_debug("Assume no T-state coordination\n");
--
-- return;
}
static int acpi_processor_throttling_notifier(unsigned long event, void *data)
{
struct throttling_tstate *p_tstate = data;
struct acpi_processor *pr;
-- unsigned int cpu ;
++ unsigned int cpu;
int target_state;
struct acpi_processor_limit *p_limit;
struct acpi_processor_throttling *p_throttling;
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
-- printk(KERN_WARNING
-- "Exceed the limit of T-state \n");
++ pr_warn("Exceed the limit of T-state \n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
cpu, target_state);
break;
default:
-- printk(KERN_WARNING
-- "Unsupported Throttling notifier event\n");
++ pr_warn("Unsupported Throttling notifier event\n");
break;
}
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *ptc = NULL;
-- union acpi_object obj = { 0 };
++ union acpi_object obj;
struct acpi_processor_throttling *throttling;
status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
ptc = (union acpi_object *)buffer.pointer;
if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
|| (ptc->package.count != 2)) {
-- printk(KERN_ERR PREFIX "Invalid _PTC data\n");
++ pr_err("Invalid _PTC data\n");
result = -EFAULT;
goto end;
}
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
-- printk(KERN_ERR PREFIX
-- "Invalid _PTC data (control_register)\n");
++ pr_err("Invalid _PTC data (control_register)\n");
result = -EFAULT;
goto end;
}
if ((obj.type != ACPI_TYPE_BUFFER)
|| (obj.buffer.length < sizeof(struct acpi_ptc_register))
|| (obj.buffer.pointer == NULL)) {
-- printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
++ pr_err("Invalid _PTC data (status_register)\n");
result = -EFAULT;
goto end;
}
if ((throttling->control_register.bit_width +
throttling->control_register.bit_offset) > 32) {
-- printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
++ pr_err("Invalid _PTC control register\n");
result = -EFAULT;
goto end;
}
if ((throttling->status_register.bit_width +
throttling->status_register.bit_offset) > 32) {
-- printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
++ pr_err("Invalid _PTC status register\n");
result = -EFAULT;
goto end;
}
-- end:
++end:
kfree(buffer.pointer);
return result;
tss = buffer.pointer;
if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
-- printk(KERN_ERR PREFIX "Invalid _TSS data\n");
++ pr_err("Invalid _TSS data\n");
result = -EFAULT;
goto end;
}
}
if (!tx->freqpercentage) {
-- printk(KERN_ERR PREFIX
-- "Invalid _TSS data: freq is zero\n");
++ pr_err("Invalid _TSS data: freq is zero\n");
result = -EFAULT;
kfree(pr->throttling.states_tss);
goto end;
}
}
- end:
+ end:
kfree(buffer.pointer);
return result;
tsd = buffer.pointer;
if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
-- printk(KERN_ERR PREFIX "Invalid _TSD data\n");
++ pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (tsd->package.count != 1) {
-- printk(KERN_ERR PREFIX "Invalid _TSD data\n");
++ pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
status = acpi_extract_package(&(tsd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
-- printk(KERN_ERR PREFIX "Invalid _TSD data\n");
++ pr_err("Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
-- printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
++ pr_err("Unknown _TSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
-- printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
++ pr_err("Unknown _TSD:revision\n");
result = -EFAULT;
goto end;
}
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
- end:
+ end:
kfree(buffer.pointer);
return result;
}
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
-- printk(KERN_ERR PREFIX
-- "HARDWARE addr space,NOT supported yet\n");
++ pr_err("HARDWARE addr space,NOT supported yet\n");
} else {
msr_low = 0;
msr_high = 0;
rdmsr_safe(MSR_IA32_THERM_CONTROL,
-- (u32 *)&msr_low , (u32 *) &msr_high);
++ (u32 *)&msr_low, (u32 *) &msr_high);
msr = (msr_high << 32) | msr_low;
*value = (u64) msr;
ret = 0;
if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
!this_cpu_has(X86_FEATURE_ACPI)) {
-- printk(KERN_ERR PREFIX
-- "HARDWARE addr space,NOT supported yet\n");
++ pr_err("HARDWARE addr space,NOT supported yet\n");
} else {
msr = value;
wrmsr_safe(MSR_IA32_THERM_CONTROL,
#else
static int acpi_throttling_rdmsr(u64 *value)
{
-- printk(KERN_ERR PREFIX
-- "HARDWARE addr space,NOT supported yet\n");
++ pr_err("HARDWARE addr space,NOT supported yet\n");
return -1;
}
static int acpi_throttling_wrmsr(u64 value)
{
-- printk(KERN_ERR PREFIX
-- "HARDWARE addr space,NOT supported yet\n");
++ pr_err("HARDWARE addr space,NOT supported yet\n");
return -1;
}
#endif
ret = acpi_throttling_rdmsr(value);
break;
default:
-- printk(KERN_ERR PREFIX "Unknown addr space %d\n",
++ pr_err("Unknown addr space %d\n",
(u32) (throttling->status_register.space_id));
}
return ret;
ret = acpi_throttling_wrmsr(value);
break;
default:
-- printk(KERN_ERR PREFIX "Unknown addr space %d\n",
++ pr_err("Unknown addr space %d\n",
(u32) (throttling->control_register.space_id));
}
return ret;
}
/* TBD: Support duty_cycle values that span bit 4. */
else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
-- printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
++ pr_warn("duty_cycle spans bit 4\n");
return -EINVAL;
}
*/
if (acpi_processor_get_throttling_control(pr) ||
acpi_processor_get_throttling_states(pr) ||
-- acpi_processor_get_platform_limit(pr))
-- {
++ acpi_processor_get_platform_limit(pr)) {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_fadt;
pr->throttling.acpi_processor_set_throttling =
goto end;
}
- end:
+ end:
if (result)
pr->flags.throttling = 0;
* scan.c - support for transforming the ACPI namespace into individual objects
*/
++ #define pr_fmt(fmt) "ACPI: " fmt
++
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
*/
static u64 spcr_uart_addr;
--struct acpi_dep_data {
-- struct list_head node;
-- acpi_handle supplier;
-- acpi_handle consumer;
--};
--
void acpi_scan_lock_acquire(void)
{
mutex_lock(&acpi_scan_lock);
return handle_to_device(handle, get_acpi_device);
}
--void acpi_bus_put_acpi_device(struct acpi_device *adev)
--{
-- acpi_dev_put(adev);
--}
--
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
struct acpi_device_bus_id *acpi_device_bus_id;
return 0;
}
--int acpi_device_add(struct acpi_device *device,
-- void (*release)(struct device *))
++static int acpi_tie_acpi_dev(struct acpi_device *adev)
{
-- struct acpi_device_bus_id *acpi_device_bus_id;
-- int result;
++ acpi_handle handle = adev->handle;
++ acpi_status status;
-- if (device->handle) {
-- acpi_status status;
++ if (!handle)
++ return 0;
-- status = acpi_attach_data(device->handle, acpi_scan_drop_device,
-- device);
-- if (ACPI_FAILURE(status)) {
-- acpi_handle_err(device->handle,
-- "Unable to attach device data\n");
-- return -ENODEV;
-- }
++ status = acpi_attach_data(handle, acpi_scan_drop_device, adev);
++ if (ACPI_FAILURE(status)) {
++ acpi_handle_err(handle, "Unable to attach device data\n");
++ return -ENODEV;
}
++ return 0;
++}
++
++static int __acpi_device_add(struct acpi_device *device,
++ void (*release)(struct device *))
++{
++ struct acpi_device_bus_id *acpi_device_bus_id;
++ int result;
++
/*
* Linkage
* -------
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
goto err_unlock;
}
result = acpi_device_setup_files(device);
if (result)
-- printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
++ pr_err("Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
return 0;
return result;
}
++int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
++{
++ int ret;
++
++ ret = acpi_tie_acpi_dev(adev);
++ if (ret)
++ return ret;
++
++ return __acpi_device_add(adev, release);
++}
++
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
acpi_get_object_info(handle, &info);
if (!info) {
-- pr_err(PREFIX "%s: Error reading device info\n",
-- __func__);
++ pr_err("%s: Error reading device info\n", __func__);
return;
}
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
-- /* Assume there are unmet deps to start with. */
-- device->dep_unmet = 1;
++}
++
++static void acpi_scan_dep_init(struct acpi_device *adev)
++{
++ struct acpi_dep_data *dep;
++
++ list_for_each_entry(dep, &acpi_dep_list, node) {
++ if (dep->consumer == adev->handle)
++ adev->dep_unmet++;
++ }
}
void acpi_device_add_finalize(struct acpi_device *device)
}
static int acpi_add_single_object(struct acpi_device **child,
-- acpi_handle handle, int type)
++ acpi_handle handle, int type, bool dep_init)
{
struct acpi_device *device;
++ bool release_dep_lock = false;
int result;
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
* acpi_bus_get_status() and use its quirk handling. Note that
* this must be done before the get power-/wakeup_dev-flags calls.
*/
-- if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR)
++ if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) {
++ if (dep_init) {
++ mutex_lock(&acpi_dep_list_lock);
++ /*
++ * Hold the lock until the acpi_tie_acpi_dev() call
++ * below to prevent concurrent acpi_scan_clear_dep()
++ * from deleting a dependency list entry without
++ * updating dep_unmet for the device.
++ */
++ release_dep_lock = true;
++ acpi_scan_dep_init(device);
++ }
acpi_scan_init_status(device);
++ }
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
-- result = acpi_device_add(device, acpi_device_release);
++ result = acpi_tie_acpi_dev(device);
++
++ if (release_dep_lock)
++ mutex_unlock(&acpi_dep_list_lock);
++
++ if (!result)
++ result = __acpi_device_add(device, acpi_device_release);
++
if (result) {
acpi_device_release(&device->dev);
return result;
return count;
}
--static void acpi_scan_dep_init(struct acpi_device *adev)
--{
-- struct acpi_dep_data *dep;
--
-- adev->dep_unmet = 0;
--
-- mutex_lock(&acpi_dep_list_lock);
--
-- list_for_each_entry(dep, &acpi_dep_list, node) {
-- if (dep->consumer == adev->handle)
-- adev->dep_unmet++;
-- }
--
-- mutex_unlock(&acpi_dep_list_lock);
--}
--
static bool acpi_bus_scan_second_pass;
static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
return AE_OK;
}
-- acpi_add_single_object(&device, handle, type);
-- if (!device)
-- return AE_CTRL_DEPTH;
--
-- acpi_scan_init_hotplug(device);
/*
* If check_dep is true at this point, the device has no dependencies,
* or the creation of the device object would have been postponed above.
*/
-- if (check_dep)
-- device->dep_unmet = 0;
-- else
-- acpi_scan_dep_init(device);
++ acpi_add_single_object(&device, handle, type, !check_dep);
++ if (!device)
++ return AE_CTRL_DEPTH;
++
++ acpi_scan_init_hotplug(device);
out:
if (!*adev_p)
device->handler->hotplug.notify_online(device);
}
--void acpi_walk_dep_device_list(acpi_handle handle)
++static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
{
-- struct acpi_dep_data *dep, *tmp;
struct acpi_device *adev;
++ adev = acpi_bus_get_acpi_device(dep->consumer);
++ if (adev) {
++ *(struct acpi_device **)data = adev;
++ return 1;
++ }
++ /* Continue parsing if the device object is not present. */
++ return 0;
++}
++
++struct acpi_scan_clear_dep_work {
++ struct work_struct work;
++ struct acpi_device *adev;
++};
++
++static void acpi_scan_clear_dep_fn(struct work_struct *work)
++{
++ struct acpi_scan_clear_dep_work *cdw;
++
++ cdw = container_of(work, struct acpi_scan_clear_dep_work, work);
++
++ acpi_scan_lock_acquire();
++ acpi_bus_attach(cdw->adev, true);
++ acpi_scan_lock_release();
++
++ acpi_dev_put(cdw->adev);
++ kfree(cdw);
++}
++
++static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
++{
++ struct acpi_scan_clear_dep_work *cdw;
++
++ if (adev->dep_unmet)
++ return false;
++
++ cdw = kmalloc(sizeof(*cdw), GFP_KERNEL);
++ if (!cdw)
++ return false;
++
++ cdw->adev = adev;
++ INIT_WORK(&cdw->work, acpi_scan_clear_dep_fn);
++ /*
++ * Since the work function may block on the lock until the entire
++ * initial enumeration of devices is complete, put it into the unbound
++ * workqueue.
++ */
++ queue_work(system_unbound_wq, &cdw->work);
++
++ return true;
++}
++
++static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
++{
++ struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer);
++
++ if (adev) {
++ adev->dep_unmet--;
++ if (!acpi_scan_clear_dep_queue(adev))
++ acpi_dev_put(adev);
++ }
++
++ list_del(&dep->node);
++ kfree(dep);
++
++ return 0;
++}
++
++/**
++ * acpi_walk_dep_device_list - Apply a callback to every entry in acpi_dep_list
++ * @handle: The ACPI handle of the supplier device
++ * @callback: Pointer to the callback function to apply
++ * @data: Pointer to some data to pass to the callback
++ *
++ * The return value of the callback determines this function's behaviour. If 0
++ * is returned we continue to iterate over acpi_dep_list. If a positive value
++ * is returned then the loop is broken but this function returns 0. If a
++ * negative value is returned by the callback then the loop is broken and that
++ * value is returned as the final error.
++ */
++static int acpi_walk_dep_device_list(acpi_handle handle,
++ int (*callback)(struct acpi_dep_data *, void *),
++ void *data)
++{
++ struct acpi_dep_data *dep, *tmp;
++ int ret = 0;
++
mutex_lock(&acpi_dep_list_lock);
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
-- acpi_bus_get_device(dep->consumer, &adev);
--
-- if (adev) {
-- adev->dep_unmet--;
-- if (!adev->dep_unmet)
-- acpi_bus_attach(adev, true);
-- }
--
-- list_del(&dep->node);
-- kfree(dep);
++ ret = callback(dep, data);
++ if (ret)
++ break;
}
}
mutex_unlock(&acpi_dep_list_lock);
++
++ return ret > 0 ? 0 : ret;
++}
++
++/**
++ * acpi_dev_clear_dependencies - Inform consumers that the device is now active
++ * @supplier: Pointer to the supplier &struct acpi_device
++ *
++ * Clear dependencies on the given device.
++ */
++void acpi_dev_clear_dependencies(struct acpi_device *supplier)
++{
++ acpi_walk_dep_device_list(supplier->handle, acpi_scan_clear_dep, NULL);
++}
++EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
++
++/**
++ * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
++ * @supplier: Pointer to the dependee device
++ *
++ * Returns the first &struct acpi_device which declares itself dependent on
++ * @supplier via the _DEP buffer, parsed from the acpi_dep_list.
++ *
++ * The caller is responsible for putting the reference to adev when it is no
++ * longer needed.
++ */
++struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier)
++{
++ struct acpi_device *adev = NULL;
++
++ acpi_walk_dep_device_list(supplier->handle,
++ acpi_dev_get_first_consumer_dev_cb, &adev);
++
++ return adev;
}
--EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
++EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev);
/**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
struct acpi_device *device = NULL;
int result;
-- result = acpi_add_single_object(&device, NULL, type);
++ result = acpi_add_single_object(&device, NULL, type, false);
if (result)
return result;
struct acpi_device *device = NULL;
result = acpi_add_single_object(&device, NULL,
-- ACPI_BUS_TYPE_POWER_BUTTON);
++ ACPI_BUS_TYPE_POWER_BUTTON, false);
if (result)
return result;
struct acpi_device *device = NULL;
result = acpi_add_single_object(&device, NULL,
-- ACPI_BUS_TYPE_SLEEP_BUTTON);
++ ACPI_BUS_TYPE_SLEEP_BUTTON, false);
if (result)
return result;
status = acpi_get_table(ACPI_SIG_SPCR, 0,
(struct acpi_table_header **)&spcr_ptr);
if (ACPI_FAILURE(status)) {
-- pr_warn(PREFIX "STAO table present, but SPCR is missing\n");
++ pr_warn("STAO table present, but SPCR is missing\n");
return;
}
(struct acpi_table_header **)&stao_ptr);
if (ACPI_SUCCESS(status)) {
if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
-- pr_info(PREFIX "STAO Name List not yet supported.\n");
++ pr_info("STAO Name List not yet supported.\n");
if (stao_ptr->ignore_uart)
acpi_get_spcr_uart_addr();
}
}
- acpi_turn_off_unused_power_resources(true);
++ acpi_turn_off_unused_power_resources();
+
acpi_scan_initialized = true;
out:
return count;
}
--struct acpi_table_events_work {
-- struct work_struct work;
-- void *table;
-- u32 event;
--};
--
static void acpi_table_events_fn(struct work_struct *work)
{
-- struct acpi_table_events_work *tew;
++ acpi_scan_lock_acquire();
++ acpi_bus_scan(ACPI_ROOT_OBJECT);
++ acpi_scan_lock_release();
-- tew = container_of(work, struct acpi_table_events_work, work);
--
-- if (tew->event == ACPI_TABLE_EVENT_LOAD) {
-- acpi_scan_lock_acquire();
-- acpi_bus_scan(ACPI_ROOT_OBJECT);
-- acpi_scan_lock_release();
-- }
--
-- kfree(tew);
++ kfree(work);
}
--void acpi_scan_table_handler(u32 event, void *table, void *context)
++void acpi_scan_table_notify(void)
{
-- struct acpi_table_events_work *tew;
++ struct work_struct *work;
if (!acpi_scan_initialized)
return;
-- if (event != ACPI_TABLE_EVENT_LOAD)
-- return;
--
-- tew = kmalloc(sizeof(*tew), GFP_KERNEL);
-- if (!tew)
++ work = kmalloc(sizeof(*work), GFP_KERNEL);
++ if (!work)
return;
-- INIT_WORK(&tew->work, acpi_table_events_fn);
-- tew->table = table;
-- tew->event = event;
--
-- schedule_work(&tew->work);
++ INIT_WORK(work, acpi_table_events_fn);
++ schedule_work(work);
}
int acpi_reconfig_notifier_register(struct notifier_block *nb)
* Copyright (c) 2003 Open Source Development Lab
*/
++ #define pr_fmt(fmt) "ACPI: PM: " fmt
++
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/dmi.h>
* OS can't evaluate the _TTS object correctly. Some warning
* message will be printed. But it won't break anything.
*/
-- printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
++ pr_notice("Failure in evaluating _TTS object\n");
}
}
}
ACPI_FLUSH_CPU_CACHE();
#endif
-- printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
-- acpi_state);
++ pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
acpi_enable_wakeup_devices(acpi_state);
acpi_enter_sleep_state_prep(acpi_state);
return 0;
}
/**
-- * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
++ * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
*/
static int acpi_pm_pre_suspend(void)
{
if (acpi_state == ACPI_STATE_S0)
return;
-- printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
-- acpi_state);
++ pr_info("Waking up from system sleep state S%d\n", acpi_state);
acpi_disable_wakeup_devices(acpi_state);
acpi_leave_sleep_state(acpi_state);
*/
static void acpi_pm_end(void)
{
- acpi_turn_off_unused_power_resources(false);
+ acpi_turn_off_unused_power_resources();
acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a
error = acpi_suspend_lowlevel();
if (error)
return error;
-- pr_info(PREFIX "Low-level resume complete\n");
++ pr_info("Low-level resume complete\n");
pm_set_resume_via_firmware();
break;
}
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature)
-- pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
++ pr_crit("Hardware changed while hibernated, success doubtful!\n");
/* Restore the NVS memory area */
suspend_nvs_restore();
/* Allow EC transactions to happen. */
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
-- if (facs) {
++ if (facs)
s4_hardware_signature = facs->hardware_signature;
-- acpi_put_table((struct acpi_table_header *)facs);
-- }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
static void acpi_power_off(void)
{
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
-- printk(KERN_DEBUG "%s called\n", __func__);
++ pr_debug("%s called\n", __func__);
local_irq_disable();
acpi_enter_sleep_state(ACPI_STATE_S5);
}
if (sleep_states[i])
pos += sprintf(pos, " S%d", i);
}
-- pr_info(PREFIX "(supports%s)\n", supported);
++ pr_info("(supports%s)\n", supported);
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
#define pr_fmt(fmt) "ACPI: " fmt
++#include <linux/acpi.h>
++#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
--#include <linux/acpi.h>
#include "internal.h"
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable\n");
-- else {
-- if (acpi_gbl_trace_method_name) {
-- if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
-- return sprintf(buffer, "method-once\n");
-- else
-- return sprintf(buffer, "method\n");
-- } else
-- return sprintf(buffer, "enable\n");
-- }
-- return 0;
++ if (!acpi_gbl_trace_method_name)
++ return sprintf(buffer, "enable\n");
++ if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
++ return sprintf(buffer, "method-once\n");
++ else
++ return sprintf(buffer, "method\n");
}
module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
}
table_attr->instance++;
if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
-- pr_warn("%4.4s: too many table instances\n",
-- table_attr->name);
++ pr_warn("%4.4s: too many table instances\n", table_attr->name);
return -ERANGE;
}
switch (event) {
case ACPI_TABLE_EVENT_INSTALL:
-- table_attr =
-- kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
++ table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
-- void __iomem *base;
++ void *base;
ssize_t rc;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
kfree(counter_attrs);
}
kfree(all_attrs);
--
-- return;
}
static void gpe_count(u32 gpe_number)
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
--
-- return;
}
static void fixed_event_count(u32 event_number)
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
--
-- return;
}
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
goto end;
if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
-- printk(KERN_WARNING PREFIX
-- "Can not change Invalid GPE/Fixed Event status\n");
++ pr_warn("Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
* the GPE flooding for GPE 00, they need to specify the following boot
* parameter:
* acpi_mask_gpe=0x00
++ * Note, the parameter can be a list (see bitmap_parselist() for the details).
* The masking status can be modified by the following runtime controlling
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
static int __init acpi_gpe_set_masked_gpes(char *val)
{
++ int ret;
u8 gpe;
-- if (kstrtou8(val, 0, &gpe))
-- return -EINVAL;
-- set_bit(gpe, acpi_masked_gpes_map);
++ ret = kstrtou8(val, 0, &gpe);
++ if (ret) {
++ ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
++ if (ret)
++ return ret;
++ } else
++ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
-- all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
-- GFP_KERNEL);
++ all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
if (all_attrs == NULL)
return;
-- all_counters = kcalloc(num_counters, sizeof(struct event_counter),
-- GFP_KERNEL);
++ all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
if (all_counters == NULL)
goto fail;
if (ACPI_FAILURE(status))
goto fail;
-- counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
-- GFP_KERNEL);
++ counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
fail:
delete_gpe_attr_array();
-- return;
}
static void __exit interrupt_stats_exit(void)
sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
delete_gpe_attr_array();
--
-- return;
}
--static ssize_t
--acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
-- char *buf)
++static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
--static const struct kobj_attribute pm_profile_attr =
-- __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
++static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
--static ssize_t hotplug_enabled_show(struct kobject *kobj,
-- struct kobj_attribute *attr, char *buf)
++static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
return sprintf(buf, "%d\n", hotplug->enabled);
}
--static ssize_t hotplug_enabled_store(struct kobject *kobj,
-- struct kobj_attribute *attr,
-- const char *buf, size_t size)
++static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
++ const char *buf, size_t size)
{
struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
unsigned int val;
return size;
}
--static struct kobj_attribute hotplug_enabled_attr =
-- __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
-- hotplug_enabled_store);
++static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
static struct attribute *hotplug_profile_attrs[] = {
&hotplug_enabled_attr.attr,
return;
err_out:
-- pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
++ pr_err("Unable to add hotplug profile '%s'\n", name);
}
static ssize_t force_remove_show(struct kobject *kobj,
return size;
}
--static const struct kobj_attribute force_remove_attr =
-- __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
-- force_remove_store);
++static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
int __init acpi_sysfs_init(void)
{