select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select HAVE_OPROFILE
+ select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FTRACE if (!XIP_KERNEL)
select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE)
+ select HAVE_GENERIC_DMA_COHERENT
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
config ARCH_AAEC2000
bool "Agilent AAEC-2000 based"
select ARM_AMBA
+ select HAVE_CLK
help
This enables support for systems based on the Agilent AAEC-2000
config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
+ select HAVE_CLK
select ICST525
help
Support for ARM's Integrator platform.
config ARCH_REALVIEW
bool "ARM Ltd. RealView family"
select ARM_AMBA
+ select HAVE_CLK
select ICST307
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
bool "ARM Ltd. Versatile family"
select ARM_AMBA
select ARM_VIC
+ select HAVE_CLK
select ICST307
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARM_AMBA
select ARM_VIC
select GENERIC_GPIO
- select HAVE_GPIO_LIB
+ select HAVE_CLK
+ select HAVE_CLK
+ select ARCH_REQUIRE_GPIOLIB
help
This enables support for the Cirrus EP93xx series of CPUs.
select PLAT_IOP
select PCI
select GENERIC_GPIO
- select HAVE_GPIO_LIB
+ select ARCH_REQUIRE_GPIOLIB
help
Support for Intel's 80219 and IOP32X (XScale) family of
processors.
select PLAT_IOP
select PCI
select GENERIC_GPIO
- select HAVE_GPIO_LIB
+ select ARCH_REQUIRE_GPIOLIB
help
Support for Intel's IOP33X (XScale) family of processors.
select GENERIC_GPIO
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
help
Say Y here if you intend to run this kernel on a NetSilicon NS9xxx
System.
select GENERIC_CLOCKEVENTS
select ARCH_MTD_XIP
select GENERIC_GPIO
- select HAVE_GPIO_LIB
+ select ARCH_REQUIRE_GPIOLIB
help
Support for Freescale MXC/iMX-based family of processors
config ARCH_PNX4008
bool "Philips Nexperia PNX4008 Mobile"
+ select HAVE_CLK
help
This enables support for Philips PNX4008 mobile platform.
depends on MMU
select ARCH_MTD_XIP
select GENERIC_GPIO
- select HAVE_GPIO_LIB
+ select HAVE_CLK
+ select ARCH_REQUIRE_GPIOLIB
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select TICK_ONESHOT
select GENERIC_GPIO
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
select TICK_ONESHOT
- select HAVE_GPIO_LIB
+ select ARCH_REQUIRE_GPIOLIB
help
Support for StrongARM 11x0 based boards.
config ARCH_S3C2410
bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
select GENERIC_GPIO
+ select HAVE_CLK
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
+ select HAVE_CLK
help
Support for TI's DaVinci platform.
config ARCH_OMAP
bool "TI OMAP"
select GENERIC_GPIO
- select HAVE_GPIO_LIB
+ select HAVE_CLK
+ select ARCH_REQUIRE_GPIOLIB
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
help
config SUPERH
def_bool y
select EMBEDDED
+ select HAVE_CLK
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_GENERIC_DMA_COHERENT
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
Select RTS7751R2D if configuring for a Renesas Technology
Sales SH-Graphics board.
+ config SH_RSK7203
+ bool "RSK7203"
+ depends on CPU_SUBTYPE_SH7203
+
config SH_SDK7780
bool "SDK7780R3"
depends on CPU_SUBTYPE_SH7780
select SYS_SUPPORTS_PCI
select IO_TRAPPED
+ config SH_SH7785LCR
+ bool "SH7785LCR"
+ depends on CPU_SUBTYPE_SH7785
+ select SYS_SUPPORTS_PCI
+ select IO_TRAPPED
+
+ config SH_SH7785LCR_29BIT_PHYSMAPS
+ bool "SH7785LCR 29bit physmaps"
+ depends on SH_SH7785LCR
+ default y
+ help
+ This board has 2 physical memory maps. It can be changed with
+ DIP switch(S2-5). If you set the DIP switch for S2-5 = ON,
+ you can access all on-board device in 29bit address mode.
+
config SH_MIGOR
bool "Migo-R"
depends on CPU_SUBTYPE_SH7722
Select Migo-R if configuring for the SH7722 Migo-R platform
by Renesas System Solutions Asia Pte. Ltd.
+ config SH_AP325RXA
+ bool "AP-325RXA"
+ depends on CPU_SUBTYPE_SH7723
+ help
+ Renesas "AP-325RXA" support.
+ Compatible with ALGO SYSTEM CO.,LTD. "AP-320A"
+
+ config SH_SH7763RDP
+ bool "SH7763RDP"
+ depends on CPU_SUBTYPE_SH7763
+ help
+ Select SH7763RDP if configuring for a Renesas SH7763
+ evaluation board.
+
config SH_EDOSK7705
bool "EDOSK7705"
depends on CPU_SUBTYPE_SH7705
source "arch/sh/boards/renesas/rts7751r2d/Kconfig"
source "arch/sh/boards/renesas/r7780rp/Kconfig"
source "arch/sh/boards/renesas/sdk7780/Kconfig"
+ source "arch/sh/boards/renesas/migor/Kconfig"
source "arch/sh/boards/magicpanelr2/Kconfig"
menu "Timer and clock configuration"
* for more details.
*/
#include <linux/mm.h>
+ #include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret, *ret_nocache;
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
- if (mem) {
- int page = bitmap_find_free_region(mem->bitmap, mem->size,
- order);
- if (page >= 0) {
- *dma_handle = mem->device_base + (page << PAGE_SHIFT);
- ret = mem->virt_base + (page << PAGE_SHIFT);
- memset(ret, 0, size);
- return ret;
- }
- if (mem->flags & DMA_MEMORY_EXCLUSIVE)
- return NULL;
- }
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+ return ret;
ret = (void *)__get_free_pages(gfp, order);
if (!ret)
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
- if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
- int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-
- bitmap_release_region(mem->bitmap, page, order);
- } else {
+ if (!dma_release_from_coherent(dev, order, vaddr)) {
WARN_ON(irqs_disabled()); /* for portability */
BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
free_pages((unsigned long)phys_to_virt(dma_handle), order);
}
EXPORT_SYMBOL(dma_free_coherent);
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void __iomem *mem_base = NULL;
- int pages = size >> PAGE_SHIFT;
- int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
- goto out;
- if (dev->dma_mem)
- goto out;
-
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
- mem_base = ioremap_nocache(bus_addr, size);
- if (!mem_base)
- goto out;
-
- dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
- goto out;
- dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
- goto free1_out;
-
- dev->dma_mem->virt_base = mem_base;
- dev->dma_mem->device_base = device_addr;
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
-
- free1_out:
- kfree(dev->dma_mem);
- out:
- if (mem_base)
- iounmap(mem_base);
- return 0;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
-
- if (!mem)
- return;
- dev->dma_mem = NULL;
- iounmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
- int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- int pos, err;
-
- if (!mem)
- return ERR_PTR(-EINVAL);
-
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
- if (err != 0)
- return ERR_PTR(err);
- return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
}
EXPORT_SYMBOL(dma_cache_sync);
+
+ int platform_resource_setup_memory(struct platform_device *pdev,
+ char *name, unsigned long memsize)
+ {
+ struct resource *r;
+ dma_addr_t dma_handle;
+ void *buf;
+
+ r = pdev->resource + pdev->num_resources - 1;
+ if (r->flags) {
+ pr_warning("%s: unable to find empty space for resource\n",
+ name);
+ return -EINVAL;
+ }
+
+ buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
+ if (!buf) {
+ pr_warning("%s: unable to allocate memory\n", name);
+ return -ENOMEM;
+ }
+
+ memset(buf, 0, memsize);
+
+ r->flags = IORESOURCE_MEM;
+ r->start = dma_handle;
+ r->end = r->start + memsize - 1;
+ r->name = name;
+ return 0;
+ }
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_IOREMAP_PROT
+ select HAVE_GET_USER_PAGES_FAST
select HAVE_KPROBES
+ select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_KRETPROBES
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
+ select HAVE_GENERIC_DMA_COHERENT if X86_32
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
config ARCH_DEFCONFIG
string
endif
- config X86_RDC321X
- bool "RDC R-321x SoC"
- depends on X86_32
- select M486
- select X86_REBOOTFIXUPS
- select GENERIC_GPIO
- select LEDS_CLASS
- select LEDS_GPIO
- select NEW_LEDS
- help
- This option is needed for RDC R-321x system-on-chip, also known
- as R-8610-(G).
- If you don't have one of these chips, you should say N here.
-
config X86_VSMP
bool "Support for ScaleMP vSMP"
select PARAVIRT
A kernel compiled for the Visual Workstation will run on general
PCs as well. See <file:Documentation/sgi-visws.txt> for details.
+ config X86_RDC321X
+ bool "RDC R-321x SoC"
+ depends on X86_32
+ select M486
+ select X86_REBOOTFIXUPS
+ help
+ This option is needed for RDC R-321x system-on-chip, also known
+ as R-8610-(G).
+ If you don't have one of these chips, you should say N here.
+
config SCHED_NO_NO_OMIT_FRAME_POINTER
def_bool y
prompt "Single-depth WCHAN output"
config MEMTEST
bool "Memtest"
- depends on X86_64
help
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
(CONFIG_RELOCATABLE=y).
For more details see Documentation/kdump/kdump.txt
+ config KEXEC_JUMP
+ bool "kexec jump (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on KEXEC && HIBERNATION && X86_32
+ help
+ Jump between original kernel and kexeced kernel and invoke
+ code in physical address mode via KEXEC
+
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
default "0x1000000" if X86_NUMAQ
#include <asm/proto.h>
#include <asm/dma.h>
- #include <asm/gart.h>
+ #include <asm/iommu.h>
#include <asm/calgary.h>
#include <asm/amd_iommu.h>
- int forbid_dac __read_mostly;
- EXPORT_SYMBOL(forbid_dac);
+ static int forbid_dac __read_mostly;
- const struct dma_mapping_ops *dma_ops;
+ struct dma_mapping_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
* The order of these functions is important for
* fall-back/fail-over reasons
*/
- #ifdef CONFIG_GART_IOMMU
gart_iommu_hole_init();
- #endif
- #ifdef CONFIG_CALGARY_IOMMU
detect_calgary();
- #endif
detect_intel_iommu();
amd_iommu_detect();
- #ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
- #endif
}
#endif
swiotlb = 1;
#endif
- #ifdef CONFIG_GART_IOMMU
gart_parse_options(p);
- #endif
#ifdef CONFIG_CALGARY_IOMMU
if (!strncmp(p, "calgary", 7))
}
early_param("iommu", iommu_setup);
-#ifdef CONFIG_X86_32
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- void __iomem *mem_base = NULL;
- int pages = size >> PAGE_SHIFT;
- int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
- goto out;
- if (dev->dma_mem)
- goto out;
-
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
- mem_base = ioremap(bus_addr, size);
- if (!mem_base)
- goto out;
-
- dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
- goto out;
- dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
- goto free1_out;
-
- dev->dma_mem->virt_base = mem_base;
- dev->dma_mem->device_base = device_addr;
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
-
- free1_out:
- kfree(dev->dma_mem);
- out:
- if (mem_base)
- iounmap(mem_base);
- return 0;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
-
- if (!mem)
- return;
- dev->dma_mem = NULL;
- iounmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
- int pos, err;
- int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
-
- pages >>= PAGE_SHIFT;
-
- if (!mem)
- return ERR_PTR(-EINVAL);
-
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
- if (err != 0)
- return ERR_PTR(err);
- return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
-static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
-{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
- int order = get_order(size);
-
- if (mem) {
- int page = bitmap_find_free_region(mem->bitmap, mem->size,
- order);
- if (page >= 0) {
- *dma_handle = mem->device_base + (page << PAGE_SHIFT);
- *ret = mem->virt_base + (page << PAGE_SHIFT);
- memset(*ret, 0, size);
- }
- if (mem->flags & DMA_MEMORY_EXCLUSIVE)
- *ret = NULL;
- }
- return (mem != NULL);
-}
-
-static int dma_release_coherent(struct device *dev, int order, void *vaddr)
-{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-
- if (mem && vaddr >= mem->virt_base && vaddr <
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
- int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-
- bitmap_release_region(mem->bitmap, page, order);
- return 1;
- }
- return 0;
-}
-#else
-#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
-#define dma_release_coherent(dev, order, vaddr) (0)
-#endif /* CONFIG_X86_32 */
-
int dma_supported(struct device *dev, u64 mask)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
- printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
- dev->bus_id);
+ dev_info(dev, "PCI: Disallowing DAC for device\n");
return 0;
}
#endif
- if (dma_ops->dma_supported)
- return dma_ops->dma_supported(dev, mask);
+ if (ops->dma_supported)
+ return ops->dma_supported(dev, mask);
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
- printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
- dev->bus_id, mask);
+ dev_info(dev, "Force SAC with mask %Lx\n", mask);
return 0;
}
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
void *memory = NULL;
struct page *page;
unsigned long dma_mask = 0;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!dev) {
/* Let low level make its own zone decisions */
gfp &= ~(GFP_DMA32|GFP_DMA);
- if (dma_ops->alloc_coherent)
- return dma_ops->alloc_coherent(dev, size,
+ if (ops->alloc_coherent)
+ return ops->alloc_coherent(dev, size,
dma_handle, gfp);
return NULL;
}
}
}
- if (dma_ops->alloc_coherent) {
+ if (ops->alloc_coherent) {
free_pages((unsigned long)memory, get_order(size));
gfp &= ~(GFP_DMA|GFP_DMA32);
- return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+ return ops->alloc_coherent(dev, size, dma_handle, gfp);
}
- if (dma_ops->map_simple) {
- *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
+ if (ops->map_simple) {
+ *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
size,
PCI_DMA_BIDIRECTIONAL);
if (*dma_handle != bad_dma_address)
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
int order = get_order(size);
WARN_ON(irqs_disabled()); /* for portability */
- if (dma_release_coherent(dev, order, vaddr))
+ if (dma_release_from_coherent(dev, order, vaddr))
return;
- if (dma_ops->unmap_single)
- dma_ops->unmap_single(dev, bus, size, 0);
+ if (ops->unmap_single)
+ ops->unmap_single(dev, bus, size, 0);
free_pages((unsigned long)vaddr, order);
}
EXPORT_SYMBOL(dma_free_coherent);
static int __init pci_iommu_init(void)
{
- #ifdef CONFIG_CALGARY_IOMMU
calgary_iommu_init();
- #endif
intel_iommu_init();
amd_iommu_init();
- #ifdef CONFIG_GART_IOMMU
gart_iommu_init();
- #endif
no_iommu_init();
return 0;
#include <linux/scatterlist.h>
+#include <asm-generic/dma-coherent.h>
+
/*
* DMA-consistent mapping functions. These allocate/free a region of
* uncached, unwrite-buffered mapped memory space for use with DMA
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
- static inline int dma_mapping_error(dma_addr_t dma_addr)
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == ~0;
}
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_PCI
+#include <asm-generic/dma-coherent.h>
+
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
}
static inline int
- dma_mapping_error(dma_addr_t dma_addr)
+ dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
+#include <asm-generic/dma-coherent.h>
extern struct bus_type pci_bus_type;
return L1_CACHE_BYTES;
}
- static inline int dma_mapping_error(dma_addr_t dma_addr)
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
extern int iommu_merge;
extern struct device fallback_dev;
extern int panic_on_overflow;
- extern int forbid_dac;
extern int force_iommu;
struct dma_mapping_ops {
- int (*mapping_error)(dma_addr_t dma_addr);
+ int (*mapping_error)(struct device *dev,
+ dma_addr_t dma_addr);
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
int is_phys;
};
- extern const struct dma_mapping_ops *dma_ops;
+ extern struct dma_mapping_ops *dma_ops;
- static inline int dma_mapping_error(dma_addr_t dma_addr)
+ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
- if (dma_ops->mapping_error)
- return dma_ops->mapping_error(dma_addr);
+ #ifdef CONFIG_X86_32
+ return dma_ops;
+ #else
+ if (unlikely(!dev) || !dev->archdata.dma_ops)
+ return dma_ops;
+ else
+ return dev->archdata.dma_ops;
+ #endif
+ }
+
+ /* Make sure we keep the same behaviour */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+ #ifdef CONFIG_X86_32
+ return 0;
+ #else
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
+ #endif
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+ return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->unmap_single)
- dma_ops->unmap_single(dev, addr, size, direction);
+ if (ops->unmap_single)
+ ops->unmap_single(dev, addr, size, direction);
}
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_sg(hwdev, sg, nents, direction);
+ return ops->map_sg(hwdev, sg, nents, direction);
}
static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->unmap_sg)
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
+ if (ops->unmap_sg)
+ ops->unmap_sg(hwdev, sg, nents, direction);
}
static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_cpu)
- dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
- direction);
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_for_device)
- dma_ops->sync_single_for_device(hwdev, dma_handle, size,
- direction);
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_cpu)
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
- size, direction);
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+ BUG_ON(!valid_dma_direction(direction));
+ if (ops->sync_single_range_for_cpu)
+ ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
+ size, direction);
flush_write_buffers();
}
unsigned long offset, size_t size,
int direction)
{
- BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(hwdev, dma_handle,
- offset, size, direction);
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+ BUG_ON(!valid_dma_direction(direction));
+ if (ops->sync_single_range_for_device)
+ ops->sync_single_range_for_device(hwdev, dma_handle,
+ offset, size, direction);
flush_write_buffers();
}
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_cpu)
- dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
+ if (ops->sync_sg_for_cpu)
+ ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_device)
- dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+ if (ops->sync_sg_for_device)
+ ops->sync_sg_for_device(hwdev, sg, nelems, direction);
flush_write_buffers();
}
size_t offset, size_t size,
int direction)
{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_single(dev, page_to_phys(page)+offset,
- size, direction);
+ return ops->map_single(dev, page_to_phys(page) + offset,
+ size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
#define dma_is_consistent(d, h) (1)
-#ifdef CONFIG_X86_32
-# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-struct dma_coherent_mem {
- void *virt_base;
- u32 device_base;
- int size;
- int flags;
- unsigned long *bitmap;
-};
-
-extern int
-dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags);
-
-extern void
-dma_release_declared_memory(struct device *dev);
-
-extern void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
-#endif /* CONFIG_X86_32 */
+#include <asm-generic/dma-coherent.h>
#endif
process and it's parent. Note that this file format is incompatible
with previous v0/v1/v2 file formats, so you will need updated tools
for processing it. A preliminary version of these tools is available
- at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>.
+ at <http://www.gnu.org/software/acct/>.
config TASKSTATS
bool "Export task/process statistics through netlink (EXPERIMENTAL)"
default n
depends on NAMESPACES && EXPERIMENTAL
help
- Suport process id namespaces. This allows having multiple
+ Support process id namespaces. This allows having multiple
process with the same pid as long as they are in different
pid namespaces. This is a building block of containers.
endmenu # General setup
+config HAVE_GENERIC_DMA_COHERENT
+ bool
+ default n
+
config SLABINFO
bool
depends on PROC_FS
help
Without this option you will not be able to unload any
modules (note that some modules may not be unloadable
- anyway), which makes your kernel slightly smaller and
- simpler. If unsure, say Y.
+ anyway), which makes your kernel smaller, faster
+ and simpler. If unsure, say Y.
config MODULE_FORCE_UNLOAD
bool "Forced module unloading"
will be created for all modules. If unsure, say N.
config KMOD
- bool "Automatic kernel module loading"
+ def_bool y
depends on MODULES
help
- Normally when you have selected some parts of the kernel to
- be created as kernel modules, you must load them (using the
- "modprobe" command) before you can use them. If you say Y
- here, some parts of the kernel will be able to load modules
- automatically: when a part of the kernel needs a module, it
- runs modprobe with the appropriate arguments, thereby
- loading the module if it is available. If unsure, say Y.
+ This is being removed soon. These days, CONFIG_MODULES
+ implies CONFIG_KMOD, so use that instead.
config STOP_MACHINE
bool
# Makefile for the linux kernel.
#
- obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
+ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o
+ CFLAGS_REMOVE_sched.o = -mno-spe
+
ifdef CONFIG_FTRACE
# Do not trace debug files and internal ftrace files
CFLAGS_REMOVE_lockdep.o = -pg
CFLAGS_REMOVE_sched.o = -mno-spe -pg
endif
+ obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_MARKERS) += marker.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FTRACE) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o