extern char line_buf[80];
#endif /*ENABLE_DEBUGGER */
- -static unsigned int acpi_irq_irq;
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
- - struct kref ref;
+ + unsigned long refcount;
};
static LIST_HEAD(acpi_ioremaps);
- -static DEFINE_SPINLOCK(acpi_ioremap_lock);
+ +static DEFINE_MUTEX(acpi_ioremap_lock);
static void __init acpi_osi_setup_late(void);
return NULL;
}
+ +void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
+ +{
+ + struct acpi_ioremap *map;
+ + void __iomem *virt = NULL;
+ +
+ + mutex_lock(&acpi_ioremap_lock);
+ + map = acpi_map_lookup(phys, size);
+ + if (map) {
+ + virt = map->virt + (phys - map->phys);
+ + map->refcount++;
+ + }
+ + mutex_unlock(&acpi_ioremap_lock);
+ + return virt;
+ +}
+ +EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
+ +
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
- - struct acpi_ioremap *map, *tmp_map;
- - unsigned long flags;
+ + struct acpi_ioremap *map;
void __iomem *virt;
acpi_physical_address pg_off;
acpi_size pg_sz;
if (!acpi_gbl_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
+ + mutex_lock(&acpi_ioremap_lock);
+ + /* Check if there's a suitable mapping already. */
+ + map = acpi_map_lookup(phys, size);
+ + if (map) {
+ + map->refcount++;
+ + goto out;
+ + }
+ +
map = kzalloc(sizeof(*map), GFP_KERNEL);
- - if (!map)
+ + if (!map) {
+ + mutex_unlock(&acpi_ioremap_lock);
return NULL;
+ + }
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
virt = acpi_os_ioremap(pg_off, pg_sz);
if (!virt) {
+ + mutex_unlock(&acpi_ioremap_lock);
kfree(map);
return NULL;
}
map->virt = virt;
map->phys = pg_off;
map->size = pg_sz;
- - kref_init(&map->ref);
- -
- - spin_lock_irqsave(&acpi_ioremap_lock, flags);
- - /* Check if page has already been mapped. */
- - tmp_map = acpi_map_lookup(phys, size);
- - if (tmp_map) {
- - kref_get(&tmp_map->ref);
- - spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- - iounmap(map->virt);
- - kfree(map);
- - return tmp_map->virt + (phys - tmp_map->phys);
- - }
+ + map->refcount = 1;
+ +
list_add_tail_rcu(&map->list, &acpi_ioremaps);
- - spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ + out:
+ + mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
- -static void acpi_kref_del_iomap(struct kref *ref)
+ +static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
- - struct acpi_ioremap *map;
+ + if (!--map->refcount)
+ + list_del_rcu(&map->list);
+ +}
- - map = container_of(ref, struct acpi_ioremap, ref);
- - list_del_rcu(&map->list);
+ +static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+ +{
+ + if (!map->refcount) {
+ + synchronize_rcu();
+ + iounmap(map->virt);
+ + kfree(map);
+ + }
}
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- - unsigned long flags;
- - int del;
if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
- - spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ + mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
- - spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- - printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
- - dump_stack();
+ + mutex_unlock(&acpi_ioremap_lock);
+ + WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
+ + acpi_os_drop_map_ref(map);
+ + mutex_unlock(&acpi_ioremap_lock);
- - del = kref_put(&map->ref, acpi_kref_del_iomap);
- - spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- -
- - if (!del)
- - return;
- -
- - synchronize_rcu();
- - iounmap(map->virt);
- - kfree(map);
+ + acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
__acpi_unmap_table(virt, size);
}
- -int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+ +static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
{
void __iomem *virt;
return 0;
}
- -EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
- -void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+ +static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
{
- - void __iomem *virt;
- - unsigned long flags;
- - acpi_size size = addr->bit_width / 8;
+ + struct acpi_ioremap *map;
if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
if (!addr->address || !addr->bit_width)
return;
- - spin_lock_irqsave(&acpi_ioremap_lock, flags);
- - virt = acpi_map_vaddr_lookup(addr->address, size);
- - spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ + mutex_lock(&acpi_ioremap_lock);
+ + map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+ + if (!map) {
+ + mutex_unlock(&acpi_ioremap_lock);
+ + return;
+ + }
+ + acpi_os_drop_map_ref(map);
+ + mutex_unlock(&acpi_ioremap_lock);
- - acpi_os_unmap_memory(virt, size);
+ + acpi_os_map_cleanup(map);
}
- -EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_irq_stats_init();
/*
- - * Ignore the GSI from the core, and use the value in our copy of the
- - * FADT. It may not be the same if an interrupt source override exists
- - * for the SCI.
+ + * ACPI interrupts different from the SCI in our copy of the FADT are
+ + * not supported.
*/
- - gsi = acpi_gbl_FADT.sci_interrupt;
+ + if (gsi != acpi_gbl_FADT.sci_interrupt)
+ + return AE_BAD_PARAMETER;
+ +
+ + if (acpi_irq_handler)
+ + return AE_ALREADY_ACQUIRED;
+ +
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
gsi);
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+ + acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
- - acpi_irq_irq = irq;
return AE_OK;
}
acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
{
- - if (irq) {
- - free_irq(irq, acpi_irq);
- - acpi_irq_handler = NULL;
- - acpi_irq_irq = 0;
- - }
+ + if (irq != acpi_gbl_FADT.sci_interrupt)
+ + return AE_BAD_PARAMETER;
+ +
+ + free_irq(irq, acpi_irq);
+ + acpi_irq_handler = NULL;
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_wait_events_complete);
------/*
------ * Deallocate the memory for a spinlock.
------ */
------void acpi_os_delete_lock(acpi_spinlock handle)
------{
------ return;
------}
------
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
}
EXPORT_SYMBOL(acpi_resources_are_enforced);
++++++/*
++++++ * Create and initialize a spinlock.
++++++ */
++++++acpi_status
++++++acpi_os_create_lock(acpi_spinlock *out_handle)
++++++{
++++++ spinlock_t *lock;
++++++
++++++ lock = ACPI_ALLOCATE(sizeof(spinlock_t));
++++++ if (!lock)
++++++ return AE_NO_MEMORY;
++++++ spin_lock_init(lock);
++++++ *out_handle = lock;
++++++
++++++ return AE_OK;
++++++}
++++++
++++++/*
++++++ * Deallocate the memory for a spinlock.
++++++ */
++++++void acpi_os_delete_lock(acpi_spinlock handle)
++++++{
++++++ ACPI_FREE(handle);
++++++}
++++++
/*
* Acquire a spinlock.
*
acpi_status __init acpi_os_initialize1(void)
{
- - kacpid_wq = create_workqueue("kacpid");
- - kacpi_notify_wq = create_workqueue("kacpi_notify");
- - kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ + kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ + kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ + kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
acpi_status acpi_os_terminate(void)
{
if (acpi_irq_handler) {
- - acpi_os_remove_interrupt_handler(acpi_irq_irq,
+ + acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
acpi_irq_handler);
}