static struct cryp_platform_data u8500_cryp1_platform_data = {
.mem_to_engine = {
- .dir = STEDMA40_MEM_TO_PERIPH,
+ .dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV48_CAC1,
.mode = STEDMA40_MODE_LOGICAL,
},
.engine_to_mem = {
- .dir = STEDMA40_PERIPH_TO_MEM,
+ .dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV48_CAC1,
.mode = STEDMA40_MODE_LOGICAL,
}
};
static struct stedma40_chan_cfg u8500_hash_dma_cfg_tx = {
- .dir = STEDMA40_MEM_TO_PERIPH,
+ .dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV50_HAC1_TX,
.mode = STEDMA40_MODE_LOGICAL,
};
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg ssp0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_PERIPH_TO_MEM,
+ .dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV8_SSP0,
};
static struct stedma40_chan_cfg ssp0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_MEM_TO_PERIPH,
+ .dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV8_SSP0,
};
#endif
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg uart0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_PERIPH_TO_MEM,
+ .dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV13_UART0,
};
static struct stedma40_chan_cfg uart0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_MEM_TO_PERIPH,
+ .dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV13_UART0,
};
static struct stedma40_chan_cfg uart1_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_PERIPH_TO_MEM,
+ .dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV12_UART1,
};
static struct stedma40_chan_cfg uart1_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_MEM_TO_PERIPH,
+ .dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV12_UART1,
};
static struct stedma40_chan_cfg uart2_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_PERIPH_TO_MEM,
+ .dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV11_UART2,
};
static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_MEM_TO_PERIPH,
+ .dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV11_UART2,
};
#endif
sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL;
mop500_pinmaps_init();
- parent = u8500_init_devices(&ab8500_platdata);
+ parent = u8500_init_devices();
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO;
snowball_pinmaps_init();
- parent = u8500_init_devices(&ab8500_platdata);
+ parent = u8500_init_devices();
for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
snowball_platform_devs[i]->dev.parent = parent;
sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO;
hrefv60_pinmaps_init();
- parent = u8500_init_devices(&ab8500_platdata);
+ parent = u8500_init_devices();
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
#define MAX_LCLA_ALLOC_ATTEMPTS 256
/* Bit markings for allocation map */
- #define D40_ALLOC_FREE (1 << 31)
- #define D40_ALLOC_PHY (1 << 30)
+ #define D40_ALLOC_FREE BIT(31)
+ #define D40_ALLOC_PHY BIT(30)
#define D40_ALLOC_LOG_FREE 0
+ #define D40_MEMCPY_MAX_CHANS 8
+
/* Reserved event lines for memcpy only. */
#define DB8500_DMA_MEMCPY_EV_0 51
#define DB8500_DMA_MEMCPY_EV_1 56
/* Default configuration for physcial memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.mode = STEDMA40_MODE_PHYSICAL,
- .dir = STEDMA40_MEM_TO_MEM,
+ .dir = DMA_MEM_TO_MEM,
- .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
/* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = {
.mode = STEDMA40_MODE_LOGICAL,
- .dir = STEDMA40_MEM_TO_MEM,
+ .dir = DMA_MEM_TO_MEM,
- .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
* @phy_start: Physical memory start of the DMA registers.
* @phy_size: Size of the DMA register map.
* @irq: The IRQ number.
+ * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
+ * transfers).
* @num_phy_chans: The number of physical channels. Read from HW. This
* is the number of available channels for this driver, not counting "Secure
* mode" allocated physical channels.
phys_addr_t phy_start;
resource_size_t phy_size;
int irq;
+ int num_memcpy_chans;
int num_phy_chans;
int num_log_chans;
struct device_dma_parameters dma_parms;
* that uses linked lists.
*/
if (!(chan->phy_chan->use_soft_lli &&
- chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
+ chan->dma_cfg.dir == DMA_DEV_TO_MEM))
curr_lcla = d40_lcla_alloc_one(chan, desc);
first_lcla = curr_lcla;
/*
* The dma only supports transmitting packages up to
- * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
- * dma elements required to send the entire sg list
+ * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
+ *
+ * Calculate the total number of dma elements required to send the entire sg list.
*/
static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
{
int dmalen;
u32 max_w = max(data_width1, data_width2);
u32 min_w = min(data_width1, data_width2);
- u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
if (seg_max > STEDMA40_MAX_SEG_SIZE)
- seg_max -= (1 << max_w);
+ seg_max -= max_w;
- if (!IS_ALIGNED(size, 1 << max_w))
+ if (!IS_ALIGNED(size, max_w))
return -EINVAL;
if (size <= seg_max)
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
/* Enable event line connected to device (or memcpy) */
- if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
+ (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SSLNK);
- if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
+ if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SDLNK);
}
>> D40_SREG_ELEM_PHY_ECNT_POS;
}
- return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+ return num_elt * d40c->dma_cfg.dst_info.data_width;
}
static bool d40_tx_is_linked(struct d40_chan *d40c)
return;
}
- if (d40_queue_start(d40c) == NULL)
+ if (d40_queue_start(d40c) == NULL) {
d40c->busy = false;
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
+
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
}
/* ACK interrupt */
- writel(1 << idx, base->virtbase + il[row].clr);
+ writel(BIT(idx), base->virtbase + il[row].clr);
spin_lock(&d40c->lock);
res = -EINVAL;
}
- if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+ if (conf->dir == DMA_DEV_TO_DEV) {
/*
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
}
if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
- (1 << conf->src_info.data_width) !=
+ conf->src_info.data_width !=
d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
- (1 << conf->dst_info.data_width)) {
+ conf->dst_info.data_width) {
/*
* The DMAC hardware only supports
* src (burst x width) == dst (burst x width)
if (phy->allocated_src == D40_ALLOC_FREE)
phy->allocated_src = D40_ALLOC_LOG_FREE;
- if (!(phy->allocated_src & (1 << log_event_line))) {
- phy->allocated_src |= 1 << log_event_line;
+ if (!(phy->allocated_src & BIT(log_event_line))) {
+ phy->allocated_src |= BIT(log_event_line);
goto found;
} else
goto not_found;
if (phy->allocated_dst == D40_ALLOC_FREE)
phy->allocated_dst = D40_ALLOC_LOG_FREE;
- if (!(phy->allocated_dst & (1 << log_event_line))) {
- phy->allocated_dst |= 1 << log_event_line;
+ if (!(phy->allocated_dst & BIT(log_event_line))) {
+ phy->allocated_dst |= BIT(log_event_line);
goto found;
} else
goto not_found;
/* Logical channel */
if (is_src) {
- phy->allocated_src &= ~(1 << log_event_line);
+ phy->allocated_src &= ~BIT(log_event_line);
if (phy->allocated_src == D40_ALLOC_LOG_FREE)
phy->allocated_src = D40_ALLOC_FREE;
} else {
- phy->allocated_dst &= ~(1 << log_event_line);
+ phy->allocated_dst &= ~BIT(log_event_line);
if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
phy->allocated_dst = D40_ALLOC_FREE;
}
phys = d40c->base->phy_res;
num_phy_chans = d40c->base->num_phy_chans;
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
log_num = 2 * dev_type;
is_src = true;
- } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+ d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* dst event lines are used for logical memcpy */
log_num = 2 * dev_type + 1;
is_src = false;
event_line = D40_TYPE_TO_EVENT(dev_type);
if (!is_log) {
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* Find physical half channel */
if (d40c->dma_cfg.use_fixed_channel) {
i = d40c->dma_cfg.phy_channel;
return -EINVAL;
}
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+ if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+ d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
is_src = false;
- else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
is_src = true;
else {
chan_err(d40c, "Unknown direction\n");
goto _exit;
}
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+ d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
status = readl(chanbase + D40_CHAN_REG_SDLNK);
- } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
chan_err(d40c, "Unknown direction\n");
u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
- u32 bit = 1 << event;
+ u32 bit = BIT(event);
u32 prioreg;
struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
if (d40c->base->rev < 3)
return;
- if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
+ (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
- if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
+ (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
}
switch (D40_DT_FLAGS_DIR(flags)) {
case 0:
- cfg.dir = STEDMA40_MEM_TO_PERIPH;
+ cfg.dir = DMA_MEM_TO_DEV;
cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
break;
case 1:
- cfg.dir = STEDMA40_PERIPH_TO_MEM;
+ cfg.dir = DMA_DEV_TO_MEM;
cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
break;
}
d40_set_prio_realtime(d40c);
if (chan_is_logical(d40c)) {
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
else
static int
dma40_config_to_halfchannel(struct d40_chan *d40c,
struct stedma40_half_channel_info *info,
- enum dma_slave_buswidth width,
u32 maxburst)
{
- enum stedma40_periph_data_width addr_width;
int psize;
- switch (width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- addr_width = STEDMA40_BYTE_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- addr_width = STEDMA40_HALFWORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- addr_width = STEDMA40_WORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- addr_width = STEDMA40_DOUBLEWORD_WIDTH;
- break;
- default:
- dev_err(d40c->base->dev,
- "illegal peripheral address width "
- "requested (%d)\n",
- width);
- return -EINVAL;
- }
-
if (chan_is_logical(d40c)) {
if (maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
psize = STEDMA40_PSIZE_PHY_1;
}
- info->data_width = addr_width;
info->psize = psize;
info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
if (config->direction == DMA_DEV_TO_MEM) {
config_addr = config->src_addr;
- if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
+ if (cfg->dir != DMA_DEV_TO_MEM)
dev_dbg(d40c->base->dev,
"channel was not configured for peripheral "
"to memory transfer (%d) overriding\n",
cfg->dir);
- cfg->dir = STEDMA40_PERIPH_TO_MEM;
+ cfg->dir = DMA_DEV_TO_MEM;
/* Configure the memory side */
if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
} else if (config->direction == DMA_MEM_TO_DEV) {
config_addr = config->dst_addr;
- if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
+ if (cfg->dir != DMA_MEM_TO_DEV)
dev_dbg(d40c->base->dev,
"channel was not configured for memory "
"to peripheral transfer (%d) overriding\n",
cfg->dir);
- cfg->dir = STEDMA40_MEM_TO_PERIPH;
+ cfg->dir = DMA_MEM_TO_DEV;
/* Configure the memory side */
if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
}
+ /* Only valid widths are; 1, 2, 4 and 8. */
+ if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
+ src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
+ dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ ((src_addr_width > 1) && (src_addr_width & 1)) ||
+ ((dst_addr_width > 1) && (dst_addr_width & 1)))
+ return -EINVAL;
+
+ cfg->src_info.data_width = src_addr_width;
+ cfg->dst_info.data_width = dst_addr_width;
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
- src_addr_width,
src_maxburst);
if (ret)
return ret;
ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
- dst_addr_width,
dst_maxburst);
if (ret)
return ret;
}
d40_chan_init(base, &base->dma_memcpy, base->log_chans,
- base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels));
+ base->num_log_chans, base->num_memcpy_chans);
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
struct d40_base *base = NULL;
int num_log_chans = 0;
int num_phy_chans;
+ int num_memcpy_chans;
int clk_ret = -EINVAL;
int i;
u32 pid;
else
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+ /* The number of channels used for memcpy */
+ if (plat_data->num_of_memcpy_chans)
+ num_memcpy_chans = plat_data->num_of_memcpy_chans;
+ else
+ num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
+
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
rev, res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
- (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) *
+ (num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
base->rev = rev;
base->clk = clk;
+ base->num_memcpy_chans = num_memcpy_chans;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
base->phy_start = res->start;
struct device_node *np)
{
struct stedma40_platform_data *pdata;
-
- /*
- * FIXME: Fill in this routine as more support is added.
- * First platform enabled (u8500) doens't need any extra
- * properties to run, so this is fairly sparce currently.
- */
+ int num_phy = 0, num_memcpy = 0, num_disabled = 0;
+ const const __be32 *list;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct stedma40_platform_data),
if (!pdata)
return -ENOMEM;
+ /* If absent this value will be obtained from h/w. */
+ of_property_read_u32(np, "dma-channels", &num_phy);
+ if (num_phy > 0)
+ pdata->num_of_phy_chans = num_phy;
+
+ list = of_get_property(np, "memcpy-channels", &num_memcpy);
+ num_memcpy /= sizeof(*list);
+
+ if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
+ d40_err(&pdev->dev,
+ "Invalid number of memcpy channels specified (%d)\n",
+ num_memcpy);
+ return -EINVAL;
+ }
+ pdata->num_of_memcpy_chans = num_memcpy;
+
+ of_property_read_u32_array(np, "memcpy-channels",
+ dma40_memcpy_channels,
+ num_memcpy);
+
+ list = of_get_property(np, "disabled-channels", &num_disabled);
+ num_disabled /= sizeof(*list);
+
+ if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+ d40_err(&pdev->dev,
+ "Invalid number of disabled channels specified (%d)\n",
+ num_disabled);
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(np, "disabled-channels",
+ pdata->disabled_channels,
+ num_disabled);
+ pdata->disabled_channels[num_disabled] = -1;
+
pdev->dev.platform_data = pdata;
return 0;