*
* FIXME! dispatch queue is not a queue at all!
*/
-static void as_work_handler(void *data)
+static void as_work_handler(struct work_struct *work)
{
- struct request_queue *q = data;
+ struct as_data *ad = container_of(work, struct as_data, antic_work);
+ struct request_queue *q = ad->q;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
/*
* initialize elevator private data (as_data).
*/
- static void *as_init_queue(request_queue_t *q, elevator_t *e)
+ static void *as_init_queue(request_queue_t *q)
{
struct as_data *ad;
ad->antic_timer.function = as_antic_timeout;
ad->antic_timer.data = (unsigned long)q;
init_timer(&ad->antic_timer);
- INIT_WORK(&ad->antic_work, as_work_handler, q);
+ INIT_WORK(&ad->antic_work, as_work_handler);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
}
static void
- cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
- struct request *rq)
+ cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
}
cfq_update_io_thinktime(cfqd, cic);
- cfq_update_io_seektime(cfqd, cic, rq);
+ cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
/*
* queue lock held here
*/
- static void cfq_put_request(request_queue_t *q, struct request *rq)
+ static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
return 1;
}
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
{
- request_queue_t *q = data;
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
kfree(cfqd);
}
- static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+ static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
*/
#include <scsi/scsi_cmnd.h>
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
if (q->unplug_delay == 0)
q->unplug_delay = 1;
- INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+ INIT_WORK(&q->unplug_work, blk_unplug_work);
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
}
}
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
{
- request_queue_t *q = data;
+ request_queue_t *q = container_of(work, request_queue_t, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
EXPORT_SYMBOL(blk_insert_request);
+ static int __blk_rq_unmap_user(struct bio *bio)
+ {
+ int ret = 0;
+
+ if (bio) {
+ if (bio_flagged(bio, BIO_USER_MAPPED))
+ bio_unmap_user(bio);
+ else
+ ret = bio_uncopy_user(bio);
+ }
+
+ return ret;
+ }
+
+ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+ void __user *ubuf, unsigned int len)
+ {
+ unsigned long uaddr;
+ struct bio *bio, *orig_bio;
+ int reading, ret;
+
+ reading = rq_data_dir(rq) == READ;
+
+ /*
+ * if alignment requirement is satisfied, map in user pages for
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+ if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+ bio = bio_map_user(q, NULL, uaddr, len, reading);
+ else
+ bio = bio_copy_user(q, uaddr, len, reading);
+
+ if (IS_ERR(bio)) {
+ return PTR_ERR(bio);
+ }
+
+ orig_bio = bio;
+ blk_queue_bounce(q, &bio);
+ /*
+ * We link the bounce buffer in and could have to traverse it
+ * later so we have to get a ref to prevent it from being freed
+ */
+ bio_get(bio);
+
+ /*
+ * for most (all? don't know of any) queues we could
+ * skip grabbing the queue lock here. only drivers with
+ * funky private ->back_merge_fn() function could be
+ * problematic.
+ */
+ spin_lock_irq(q->queue_lock);
+ if (!rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+ else if (!q->back_merge_fn(q, rq, bio)) {
+ ret = -EINVAL;
+ spin_unlock_irq(q->queue_lock);
+ goto unmap_bio;
+ } else {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+
+ rq->nr_sectors += bio_sectors(bio);
+ rq->hard_nr_sectors = rq->nr_sectors;
+ rq->data_len += bio->bi_size;
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ return bio->bi_size;
+
+ unmap_bio:
+ /* if it was boucned we must call the end io function */
+ bio_endio(bio, bio->bi_size, 0);
+ __blk_rq_unmap_user(orig_bio);
+ bio_put(bio);
+ return ret;
+ }
+
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* unmapping.
*/
int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
- unsigned int len)
+ unsigned long len)
{
- unsigned long uaddr;
- struct bio *bio;
- int reading;
+ unsigned long bytes_read = 0;
+ int ret;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !ubuf)
return -EINVAL;
- reading = rq_data_dir(rq) == READ;
+ while (bytes_read != len) {
+ unsigned long map_len, end, start;
- /*
- * if alignment requirement is satisfied, map in user pages for
- * direct dma. else, set up kernel bounce buffers
- */
- uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, reading);
- else
- bio = bio_copy_user(q, uaddr, len, reading);
+ map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+ end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
+ start = (unsigned long)ubuf >> PAGE_SHIFT;
- if (!IS_ERR(bio)) {
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
+ /*
+ * A bad offset could cause us to require BIO_MAX_PAGES + 1
+ * pages. If this happens we just lower the requested
+ * mapping len by a page so that we can fit
+ */
+ if (end - start > BIO_MAX_PAGES)
+ map_len -= PAGE_SIZE;
- rq->buffer = rq->data = NULL;
- rq->data_len = len;
- return 0;
+ ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+ if (ret < 0)
+ goto unmap_rq;
+ bytes_read += ret;
+ ubuf += ret;
}
- /*
- * bio is the err-ptr
- */
- return PTR_ERR(bio);
+ rq->buffer = rq->data = NULL;
+ return 0;
+ unmap_rq:
+ blk_rq_unmap_user(rq);
+ return ret;
}
EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
- struct sg_iovec *iov, int iov_count)
+ struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
if (IS_ERR(bio))
return PTR_ERR(bio);
- rq->bio = rq->biotail = bio;
+ if (bio->bi_size != len) {
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio);
+ return -EINVAL;
+ }
+
+ bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
- rq->data_len = bio->bi_size;
return 0;
}
/**
* blk_rq_unmap_user - unmap a request with user data
- * @bio: bio to be unmapped
- * @ulen: length of user buffer
+ * @rq: rq to be unmapped
*
* Description:
- * Unmap a bio previously mapped by blk_rq_map_user().
+ * Unmap a rq previously mapped by blk_rq_map_user().
+ * rq->bio must be set to the original head of the request.
*/
- int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+ int blk_rq_unmap_user(struct request *rq)
{
- int ret = 0;
+ struct bio *bio, *mapped_bio;
- if (bio) {
- if (bio_flagged(bio, BIO_USER_MAPPED))
- bio_unmap_user(bio);
+ while ((bio = rq->bio)) {
+ if (bio_flagged(bio, BIO_BOUNCED))
+ mapped_bio = bio->bi_private;
else
- ret = bio_uncopy_user(bio);
- }
+ mapped_bio = bio;
+ __blk_rq_unmap_user(mapped_bio);
+ rq->bio = bio->bi_next;
+ bio_put(bio);
+ }
return 0;
}
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
- rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
-
rq->buffer = rq->data = NULL;
- rq->data_len = len;
return 0;
}
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
+ rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
}
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
+ struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0);
* schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
* cache
*/
- if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
+ if (ap->ops->error_handler &&
+ !need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
((qc->tf.feature == SETFEATURES_WC_ON) ||
(qc->tf.feature == SETFEATURES_WC_OFF))) {
- qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
- ata_port_schedule_eh(qc->ap);
+ ap->eh_info.action |= ATA_EH_REVALIDATE;
+ ata_port_schedule_eh(ap);
}
/* For ATA pass thru (SAT) commands, generate a sense block if
}
}
- if (need_sense && !qc->ap->ops->error_handler)
- ata_dump_status(qc->ap->id, &qc->result_tf);
+ if (need_sense && !ap->ops->error_handler)
+ ata_dump_status(ap->id, &qc->result_tf);
qc->scsidone(cmd);
/**
* ata_scsi_hotplug - SCSI part of hotplug
- * @data: Pointer to ATA port to perform SCSI hotplug on
+ * @work: Pointer to ATA port to perform SCSI hotplug on
*
* Perform SCSI part of hotplug. It's executed from a separate
* workqueue after EH completes. This is necessary because SCSI
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_scsi_hotplug(void *data)
+void ata_scsi_hotplug(struct work_struct *work)
{
- struct ata_port *ap = data;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, hotplug_task.work);
int i;
if (ap->pflags & ATA_PFLAG_UNLOADING) {
/**
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
- * @data: Pointer to ATA port to perform scsi_rescan_device()
+ * @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
* libata need to propagate the changes to SCSI layer. This
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_scsi_dev_rescan(void *data)
+void ata_scsi_dev_rescan(struct work_struct *work)
{
- struct ata_port *ap = data;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, scsi_rescan_task);
struct ata_device *dev;
unsigned int i;
* @ap: ATA port to which the command is being sent
*
* RETURNS:
- * Zero.
+ * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ * 0 otherwise.
*/
int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
struct ata_port *ap)
{
+ int rc = 0;
+
ata_scsi_dump_cdb(ap, cmd);
if (likely(ata_scsi_dev_enabled(ap->device)))
- __ata_scsi_queuecmd(cmd, done, ap->device);
+ rc = __ata_scsi_queuecmd(cmd, done, ap->device);
else {
cmd->result = (DID_BAD_TARGET << 16);
done(cmd);
}
- return 0;
+ return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
static unsigned int ip_cnt;
-static void rekey_seq_generator(void *private_);
+static void rekey_seq_generator(struct work_struct *work);
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
/*
* Lock avoidance:
* happen, and even if that happens only a not perfectly compliant
* ISN is generated, nothing fatal.
*/
-static void rekey_seq_generator(void *private_)
+static void rekey_seq_generator(struct work_struct *work)
{
struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
late_initcall(seqgen_init);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
- __u16 sport, __u16 dport)
+ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport)
{
struct timeval tv;
__u32 seq;
*/
memcpy(hash, saddr, 16);
- hash[4]=(sport << 16) + dport;
+ hash[4]=((__force u16)sport << 16) + (__force u16)dport;
memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
- seq = twothirdsMD4Transform(daddr, hash) & HASH_MASK;
+ seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
seq += keyptr->count;
do_gettimeofday(&tv);
/* The code below is shamelessly stolen from secure_tcp_sequence_number().
*/
- __u32 secure_ip_id(__u32 daddr)
+ __u32 secure_ip_id(__be32 daddr)
{
struct keydata *keyptr;
__u32 hash[4];
* The dest ip address is placed in the starting vector,
* which is then hashed with random data.
*/
- hash[0] = daddr;
+ hash[0] = (__force __u32)daddr;
hash[1] = keyptr->secret[9];
hash[2] = keyptr->secret[10];
hash[3] = keyptr->secret[11];
#ifdef CONFIG_INET
- __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
- __u16 sport, __u16 dport)
+ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
{
struct timeval tv;
__u32 seq;
* Note that the words are placed into the starting vector, which is
* then mixed with a partial MD4 over random data.
*/
- hash[0]=saddr;
- hash[1]=daddr;
- hash[2]=(sport << 16) + dport;
+ hash[0]=(__force u32)saddr;
+ hash[1]=(__force u32)daddr;
+ hash[2]=((__force u16)sport << 16) + (__force u16)dport;
hash[3]=keyptr->secret[11];
seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
EXPORT_SYMBOL(secure_tcp_sequence_number);
/* Generate secure starting point for ephemeral IPV4 transport port search */
- u32 secure_ipv4_port_ephemeral(__u32 saddr, __u32 daddr, __u16 dport)
+ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
{
struct keydata *keyptr = get_keyptr();
u32 hash[4];
* Pick a unique starting offset for each ephemeral port search
* (saddr, daddr, dport) and 48bits of random data.
*/
- hash[0] = saddr;
- hash[1] = daddr;
- hash[2] = dport ^ keyptr->secret[10];
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = (__force u32)dport ^ keyptr->secret[10];
hash[3] = keyptr->secret[11];
return half_md4_transform(hash, keyptr->secret);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- u32 secure_ipv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dport)
+ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
{
struct keydata *keyptr = get_keyptr();
u32 hash[12];
memcpy(hash, saddr, 16);
- hash[4] = dport;
+ hash[4] = (__force u32)dport;
memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
- return twothirdsMD4Transform(daddr, hash);
+ return twothirdsMD4Transform((const __u32 *)daddr, hash);
}
#endif
* bit's 32-47 increase every key exchange
* 0-31 hash(source, dest)
*/
- u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
- __u16 sport, __u16 dport)
+ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
{
struct timeval tv;
u64 seq;
__u32 hash[4];
struct keydata *keyptr = get_keyptr();
- hash[0] = saddr;
- hash[1] = daddr;
- hash[2] = (sport << 16) + dport;
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
hash[3] = keyptr->secret[11];
seq = half_md4_transform(hash, keyptr->secret);
* drain on it), and uses halfMD4Transform within the second. We
* also mix it with jiffies and the PID:
*/
- return secure_ip_id(current->pid + jiffies);
+ return secure_ip_id((__force __be32)(current->pid + jiffies));
}
/*
schedule_work(&chip->work);
}
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
{
- struct tpm_chip *chip = ptr;
+ struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
down(&chip->buffer_mutex);
atomic_set(&chip->data_pending, 0);
init_MUTEX(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
- INIT_WORK(&chip->work, timeout_work, chip);
+ INIT_WORK(&chip->work, timeout_work);
init_timer(&chip->user_read_timer);
chip->user_read_timer.function = user_reader_timeout;
scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
chip->vendor.miscdev.name = devname;
- chip->vendor.miscdev.dev = dev;
+ chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
if (misc_register(&chip->vendor.miscdev)) {
/**
* do_tty_hangup - actual handler for hangup events
- * @data: tty device
+ * @work: tty device
*
* This can be called by the "eventd" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
* tasklist_lock to walk task list for hangup event
*
*/
-static void do_tty_hangup(void *data)
+static void do_tty_hangup(struct work_struct *work)
{
- struct tty_struct *tty = (struct tty_struct *) data;
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
struct file * cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
#endif
- do_tty_hangup((void *) tty);
+ do_tty_hangup(&tty->hangup_work);
}
EXPORT_SYMBOL(tty_vhangup);
* Nasty bug: do_SAK is being called in interrupt context. This can
* deadlock. We punt it up to process context. AKPM - 16Mar2001
*/
-static void __do_SAK(void *arg)
+static void __do_SAK(struct work_struct *work)
{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, SAK_work);
#ifdef TTY_SOFT_SAK
tty_hangup(tty);
#else
- struct tty_struct *tty = arg;
struct task_struct *g, *p;
int session;
int i;
{
if (!tty)
return;
- PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+ PREPARE_WORK(&tty->SAK_work, __do_SAK);
schedule_work(&tty->SAK_work);
}
/**
* flush_to_ldisc
- * @private_: tty structure passed from work queue.
+ * @work: tty structure passed from work queue.
*
* This routine is called out of the software interrupt to flush data
* from the buffer chain to the line discipline.
* receive_buf method is single threaded for each tty instance.
*/
-static void flush_to_ldisc(void *private_)
+static void flush_to_ldisc(struct work_struct *work)
{
- struct tty_struct *tty = (struct tty_struct *) private_;
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, buf.work.work);
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf, *head;
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
- flush_to_ldisc((void *) tty);
+ flush_to_ldisc(&tty->buf.work.work);
else
schedule_delayed_work(&tty->buf.work, 1);
}
tty->overrun_time = jiffies;
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
- INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+ INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
- INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+ INIT_WORK(&tty->hangup_work, do_tty_hangup);
mutex_init(&tty->atomic_read_lock);
mutex_init(&tty->atomic_write_lock);
spin_lock_init(&tty->read_lock);
INIT_LIST_HEAD(&tty->tty_files);
- INIT_WORK(&tty->SAK_work, NULL, NULL);
+ INIT_WORK(&tty->SAK_work, NULL);
}
/*
* This field is optional, if there is no known struct device
* for this tty device it can be set to NULL safely.
*
- * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
+ * Returns a pointer to the struct device for this tty device
+ * (or ERR_PTR(-EFOO) on error).
*
* This call is required to be made to register an individual tty device
* if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
* Locking: ??
*/
- struct class_device *tty_register_device(struct tty_driver *driver,
- unsigned index, struct device *device)
+ struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+ struct device *device)
{
char name[64];
dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
else
tty_line_name(driver, index, name);
- return class_device_create(tty_class, NULL, dev, device, "%s", name);
+ return device_create(tty_class, device, dev, name);
}
/**
void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
- class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
+ device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
}
EXPORT_SYMBOL(tty_register_device);
if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
panic("Couldn't register /dev/tty driver\n");
- class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+ device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), "tty");
cdev_init(&console_cdev, &console_fops);
if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
panic("Couldn't register /dev/console driver\n");
- class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
+ device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), "console");
#ifdef CONFIG_UNIX98_PTYS
cdev_init(&ptmx_cdev, &ptmx_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
panic("Couldn't register /dev/ptmx driver\n");
- class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
+ device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), "ptmx");
#endif
#ifdef CONFIG_VT
if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
panic("Couldn't register /dev/tty0 driver\n");
- class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+ device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), "tty0");
vty_init();
#endif
struct con_driver {
const struct consw *con;
const char *desc;
- struct class_device *class_dev;
+ struct device *dev;
int node;
int first;
int last;
static void set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
-static void console_callback(void *ignored);
+static void console_callback(struct work_struct *ignored);
static void blank_screen_t(unsigned long dummy);
static void set_palette(struct vc_data *vc);
static int blankinterval = 10*60*HZ;
static int vesa_off_interval;
-static DECLARE_WORK(console_work, console_callback, NULL);
+static DECLARE_WORK(console_work, console_callback);
/*
* fg_console is the current virtual console,
* with other console code and prevention of re-entrancy is
* ensured with console_sem.
*/
-static void console_callback(void *ignored)
+static void console_callback(struct work_struct *ignored)
{
acquire_console_sem();
}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
- static ssize_t store_bind(struct class_device *class_device,
+ static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct con_driver *con = class_get_devdata(class_device);
+ struct con_driver *con = dev_get_drvdata(dev);
int bind = simple_strtoul(buf, NULL, 0);
if (bind)
return count;
}
- static ssize_t show_bind(struct class_device *class_device, char *buf)
+ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct con_driver *con = class_get_devdata(class_device);
+ struct con_driver *con = dev_get_drvdata(dev);
int bind = con_is_bound(con->con);
return snprintf(buf, PAGE_SIZE, "%i\n", bind);
}
- static ssize_t show_name(struct class_device *class_device, char *buf)
+ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct con_driver *con = class_get_devdata(class_device);
+ struct con_driver *con = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
}
- static struct class_device_attribute class_device_attrs[] = {
+ static struct device_attribute device_attrs[] = {
__ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
__ATTR(name, S_IRUGO, show_name, NULL),
};
- static int vtconsole_init_class_device(struct con_driver *con)
+ static int vtconsole_init_device(struct con_driver *con)
{
int i;
int error = 0;
con->flag |= CON_DRIVER_FLAG_ATTR;
- class_set_devdata(con->class_dev, con);
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
- error = class_device_create_file(con->class_dev,
- &class_device_attrs[i]);
+ dev_set_drvdata(con->dev, con);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ error = device_create_file(con->dev, &device_attrs[i]);
if (error)
break;
}
if (error) {
while (--i >= 0)
- class_device_remove_file(con->class_dev,
- &class_device_attrs[i]);
+ device_remove_file(con->dev, &device_attrs[i]);
con->flag &= ~CON_DRIVER_FLAG_ATTR;
}
return error;
}
- static void vtconsole_deinit_class_device(struct con_driver *con)
+ static void vtconsole_deinit_device(struct con_driver *con)
{
int i;
if (con->flag & CON_DRIVER_FLAG_ATTR) {
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_remove_file(con->class_dev,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+ device_remove_file(con->dev, &device_attrs[i]);
con->flag &= ~CON_DRIVER_FLAG_ATTR;
}
}
if (retval)
goto err;
- con_driver->class_dev = class_device_create(vtconsole_class, NULL,
- MKDEV(0, con_driver->node),
- NULL, "vtcon%i",
- con_driver->node);
+ con_driver->dev = device_create(vtconsole_class, NULL,
+ MKDEV(0, con_driver->node),
+ "vtcon%i", con_driver->node);
- if (IS_ERR(con_driver->class_dev)) {
- printk(KERN_WARNING "Unable to create class_device for %s; "
+ if (IS_ERR(con_driver->dev)) {
+ printk(KERN_WARNING "Unable to create device for %s; "
"errno = %ld\n", con_driver->desc,
- PTR_ERR(con_driver->class_dev));
- con_driver->class_dev = NULL;
+ PTR_ERR(con_driver->dev));
+ con_driver->dev = NULL;
} else {
- vtconsole_init_class_device(con_driver);
+ vtconsole_init_device(con_driver);
}
err:
if (con_driver->con == csw &&
con_driver->flag & CON_DRIVER_FLAG_MODULE) {
- vtconsole_deinit_class_device(con_driver);
- class_device_destroy(vtconsole_class,
- MKDEV(0, con_driver->node));
+ vtconsole_deinit_device(con_driver);
+ device_destroy(vtconsole_class,
+ MKDEV(0, con_driver->node));
con_driver->con = NULL;
con_driver->desc = NULL;
- con_driver->class_dev = NULL;
+ con_driver->dev = NULL;
con_driver->node = 0;
con_driver->flag = 0;
con_driver->first = 0;
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con = ®istered_con_driver[i];
- if (con->con && !con->class_dev) {
- con->class_dev =
- class_device_create(vtconsole_class, NULL,
- MKDEV(0, con->node), NULL,
- "vtcon%i", con->node);
+ if (con->con && !con->dev) {
+ con->dev = device_create(vtconsole_class, NULL,
+ MKDEV(0, con->node),
+ "vtcon%i", con->node);
- if (IS_ERR(con->class_dev)) {
+ if (IS_ERR(con->dev)) {
printk(KERN_WARNING "Unable to create "
- "class_device for %s; errno = %ld\n",
- con->desc, PTR_ERR(con->class_dev));
- con->class_dev = NULL;
+ "device for %s; errno = %ld\n",
+ con->desc, PTR_ERR(con->dev));
+ con->dev = NULL;
} else {
- vtconsole_init_class_device(con);
+ vtconsole_init_device(con);
}
}
}
int status;
};
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
static DEFINE_MUTEX(lock);
static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
void rdma_addr_register_client(struct rdma_addr_client *client)
mutex_lock(&lock);
list_for_each_entry_reverse(temp_req, &req_list, list) {
- if (time_after(req->timeout, temp_req->timeout))
+ if (time_after_eq(req->timeout, temp_req->timeout))
break;
}
return ret;
}
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
struct sockaddr_in *src_in, *dst_in;
mutex_lock(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
- if (req->status) {
+ if (req->status == -ENODATA) {
src_in = (struct sockaddr_in *) &req->src_addr;
dst_in = (struct sockaddr_in *) &req->dst_addr;
req->status = addr_resolve_remote(src_in, dst_in,
req->addr);
+ if (req->status && time_after_eq(jiffies, req->timeout))
+ req->status = -ETIMEDOUT;
+ else if (req->status == -ENODATA)
+ continue;
}
- if (req->status && time_after(jiffies, req->timeout))
- req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA)
- continue;
-
- list_del(&req->list);
- list_add_tail(&req->list, &done_list);
+ list_move_tail(&req->list, &done_list);
}
if (!list_empty(&req_list)) {
if (req->addr == addr) {
req->status = -ECANCELED;
req->timeout = jiffies;
- list_del(&req->list);
- list_add(&req->list, &req_list);
+ list_move(&req->list, &req_list);
set_timeout(req->timeout);
break;
}
};
struct cm_work {
- struct work_struct work;
+ struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
__be32 rq_psn;
int timeout_ms;
enum ib_mtu path_mtu;
+ __be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
- u8 local_ack_timeout;
u8 retry_count;
u8 rnr_retry_count;
u8 service_timeout;
atomic_t work_count;
};
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
if (!private_data || !private_data_len)
return NULL;
- data = kmalloc(private_data_len, GFP_KERNEL);
+ data = kmemdup(private_data, private_data_len, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- memcpy(data, private_data, private_data_len);
return data;
}
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
- INIT_WORK(&timewait_info->work.work, cm_work_handler,
- &timewait_info->work);
+ INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
* timewait before notifying the user that we've exited timewait.
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
- wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
+ wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
cm_id_priv->timewait_info = NULL;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->retry_count = param->retry_count;
cm_id_priv->path_mtu = param->primary_path->mtu;
+ cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->local_ack_timeout =
- cm_req_get_primary_local_ack_timeout(req_msg);
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
+ cm_id_priv->pkey = req_msg->pkey;
cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->local_ack_timeout =
- cm_req_get_primary_local_ack_timeout(req_msg);
cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
unsigned long flags;
int ret;
- /* See comment in ib_cm_establish about lookup. */
+ /* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
if (!cm_id_priv)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
- cm_id->lap_state != IB_CM_LAP_IDLE) {
+ (cm_id->lap_state != IB_CM_LAP_UNINIT &&
+ cm_id->lap_state != IB_CM_LAP_IDLE)) {
ret = -EINVAL;
goto out;
}
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ if (ret)
+ goto out;
+
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
}
EXPORT_SYMBOL(ib_send_cm_lap);
- static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
+ static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
+ struct ib_sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
memset(path, 0, sizeof *path);
path->hop_limit = lap_msg->alt_hop_limit;
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
path->reversible = 1;
- /* pkey is same as in REQ */
+ path->pkey = cm_id_priv->pkey;
path->sl = cm_lap_get_sl(lap_msg);
path->mtu_selector = IB_SA_EQ;
- /* mtu is same as in REQ */
+ path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
path->rate = cm_lap_get_packet_rate(lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
param = &work->cm_event.param.lap_rcvd;
param->alternate_path = &work->path[0];
- cm_format_path_from_lap(param->alternate_path, lap_msg);
+ cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
goto unlock;
switch (cm_id_priv->id.lap_state) {
+ case IB_CM_LAP_UNINIT:
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
+ cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
}
}
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
{
- struct cm_work *work = data;
+ struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
cm_free_work(work);
}
- int ib_cm_establish(struct ib_cm_id *cm_id)
+ static int cm_establish(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
- EXPORT_SYMBOL(ib_cm_establish);
+
+ static int cm_migrate(struct ib_cm_id *cm_id)
+ {
+ struct cm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id->state == IB_CM_ESTABLISHED &&
+ (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+ cm_id->lap_state == IB_CM_LAP_IDLE)) {
+ cm_id->lap_state = IB_CM_LAP_IDLE;
+ cm_id_priv->av = cm_id_priv->alt_av;
+ } else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ return ret;
+ }
+
+ int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
+ {
+ int ret;
+
+ switch (event) {
+ case IB_EVENT_COMM_EST:
+ ret = cm_establish(cm_id);
+ break;
+ case IB_EVENT_PATH_MIG:
+ ret = cm_migrate(cm_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+ EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
return;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = (struct cm_port *)mad_agent->context;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT;
- qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE;
+ qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+ qp_attr->alt_timeout =
+ cm_id_priv->alt_av.packet_life_time + 1;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
}
ret = 0;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
- *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
- qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
- if (cm_id_priv->qp_type == IB_QPT_RC) {
- *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
- IB_QP_RNR_RETRY |
- IB_QP_MAX_QP_RD_ATOMIC;
- qp_attr->timeout = cm_id_priv->local_ack_timeout;
- qp_attr->retry_cnt = cm_id_priv->retry_count;
- qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
- qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
- }
- if (cm_id_priv->alt_av.ah_attr.dlid) {
- *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
+ *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
+ qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
+ if (cm_id_priv->qp_type == IB_QPT_RC) {
+ *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+ IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC;
+ qp_attr->timeout =
+ cm_id_priv->av.packet_life_time + 1;
+ qp_attr->retry_cnt = cm_id_priv->retry_count;
+ qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
+ qp_attr->max_rd_atomic =
+ cm_id_priv->initiator_depth;
+ }
+ if (cm_id_priv->alt_av.ah_attr.dlid) {
+ *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+ qp_attr->path_mig_state = IB_MIG_REARM;
+ }
+ } else {
+ *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
+ qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+ qp_attr->alt_timeout =
+ cm_id_priv->alt_av.packet_life_time + 1;
+ qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
qp_attr->path_mig_state = IB_MIG_REARM;
}
ret = 0;
return ret;
qp_attr.qp_state = IB_QPS_INIT;
- qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ qp_attr.qp_access_flags = 0;
qp_attr.port_num = id_priv->id.port_num;
return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT);
mutex_lock(&lock);
ret = cma_acquire_dev(conn_id);
mutex_unlock(&lock);
- if (ret) {
- ret = -ENODEV;
- cma_exch(conn_id, CMA_DESTROYING);
- cma_release_remove(conn_id);
- rdma_destroy_id(&conn_id->id);
- goto out;
- }
+ if (ret)
+ goto release_conn_id;
conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
ib_event->private_data + offset,
IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
- if (ret) {
- /* Destroy the CM ID by returning a non-zero value. */
- conn_id->cm_id.ib = NULL;
- cma_exch(conn_id, CMA_DESTROYING);
- cma_release_remove(conn_id);
- rdma_destroy_id(&conn_id->id);
- }
+ if (!ret)
+ goto out;
+
+ /* Destroy the CM ID by returning a non-zero value. */
+ conn_id->cm_id.ib = NULL;
+
+ release_conn_id:
+ cma_exch(conn_id, CMA_DESTROYING);
+ cma_release_remove(conn_id);
+ rdma_destroy_id(&conn_id->id);
+
out:
cma_release_remove(listen_id);
return ret;
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
{
- struct cma_work *work = data;
+ struct cma_work *work = container_of(_work, struct cma_work, work);
struct rdma_id_private *id_priv = work->id;
int destroy = 0;
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
u8 p;
mutex_lock(&lock);
+ if (list_empty(&dev_list)) {
+ ret = -ENODEV;
+ goto out;
+ }
list_for_each_entry(cma_dev, &dev_list, list)
for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
- if (!ib_query_port (cma_dev->device, p, &port_attr) &&
+ if (!ib_query_port(cma_dev->device, p, &port_attr) &&
port_attr.state == IB_PORT_ACTIVE)
goto port_found;
- if (!list_empty(&dev_list)) {
- p = 1;
- cma_dev = list_entry(dev_list.next, struct cma_device, list);
- } else {
- ret = -ENODEV;
- goto out;
- }
+ p = 1;
+ cma_dev = list_entry(dev_list.next, struct cma_device, list);
port_found:
ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
}
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ADDR_QUERY;
work->new_state = CMA_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
cma_dev->device = device;
cma_dev->node_guid = device->node_guid;
- if (!cma_dev->node_guid)
- goto err;
init_completion(&cma_dev->comp);
atomic_set(&cma_dev->refcount, 1);
list_for_each_entry(id_priv, &listen_any_list, list)
cma_listen_on_dev(id_priv, cma_dev);
mutex_unlock(&lock);
- return;
- err:
- kfree(cma_dev);
}
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
* 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
* the backlog is exceeded, then no more connection request events will
* be processed. cm_event_handler() returns -ENOMEM in this case. Its up
- * to the provider to reject the connectino request.
+ * to the provider to reject the connection request.
* 2) in the connection request workqueue handler, cm_conn_req_handler().
* If work elements cannot be allocated for the new connect request cm_id,
* then IWCM will call the provider reject method. This is ok since
}
/*
- * Save private data from incoming connection requests in the
- * cm_id_priv so the low level driver doesn't have to. Adjust
+ * Save private data from incoming connection requests to
+ * iw_cm_event, so the low level driver doesn't have to. Adjust
* the event ptr to point to the local copy.
*/
- static int copy_private_data(struct iwcm_id_private *cm_id_priv,
- struct iw_cm_event *event)
+ static int copy_private_data(struct iw_cm_event *event)
{
void *p;
- p = kmalloc(event->private_data_len, GFP_ATOMIC);
+ p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
if (!p)
return -ENOMEM;
- memcpy(p, event->private_data, event->private_data_len);
event->private_data = p;
return 0;
}
/*
- * Release a reference on cm_id. If the last reference is being removed
- * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+ * Release a reference on cm_id. If the last reference is being
+ * released, enable the waiting thread (in iw_destroy_cm_id) to
+ * get woken up, and return 1 if a thread is already waiting.
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
/*
* CM_ID <-- CLOSING
*
- * Block if a passive or active connection is currenlty being processed. Then
+ * Block if a passive or active connection is currently being processed. Then
* process the event as follows:
* - If we are ESTABLISHED, move to CLOSING and modify the QP state
* based on the abrupt flag
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
- int ret = 0;
+ int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
struct iwcm_id_private *cm_id_priv;
- int ret = 0;
+ int ret;
unsigned long flags;
struct ib_qp *qp;
spin_lock_irqsave(&listen_id_priv->lock, flags);
if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
spin_unlock_irqrestore(&listen_id_priv->lock, flags);
- return;
+ goto out;
}
spin_unlock_irqrestore(&listen_id_priv->lock, flags);
listen_id_priv->id.context);
/* If the cm_id could not be created, ignore the request */
if (IS_ERR(cm_id))
- return;
+ goto out;
cm_id->provider_data = iw_event->provider_data;
cm_id->local_addr = iw_event->local_addr;
if (ret) {
iw_cm_reject(cm_id, NULL, 0);
iw_destroy_cm_id(cm_id);
- return;
+ goto out;
}
/* Call the client CM handler */
kfree(cm_id);
}
+ out:
if (iw_event->private_data_len)
kfree(iw_event->private_data);
}
struct iw_cm_event *iw_event)
{
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
struct iw_cm_event *iw_event)
{
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
/*
* thread asleep on the destroy_comp list vs. an object destroyed
* here synchronously when the last reference is removed.
*/
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
{
- struct iwcm_work lwork, *work =
- container_of(_work, struct iwcm_work, work);
- struct iwcm_work *work = arg;
++ struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
+ struct iw_cm_event levent;
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
int empty;
struct iwcm_work, list);
list_del_init(&work->list);
empty = list_empty(&cm_id_priv->work_list);
- lwork = *work;
+ levent = work->event;
put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = process_event(cm_id_priv, &work->event);
+ ret = process_event(cm_id_priv, &levent);
if (ret) {
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
destroy_cm_id(&cm_id_priv->id);
goto out;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_WORK(&work->work, cm_work_handler);
work->cm_id = cm_id_priv;
work->event = *iw_event;
if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
work->event.private_data_len) {
- ret = copy_private_data(cm_id_priv, &work->event);
+ ret = copy_private_data(&work->event);
if (ret) {
put_work(work);
goto out;
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
- static kmem_cache_t *ib_mad_cache;
+ static struct kmem_cache *ib_mad_cache;
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv,
u8 mgmt_class);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
- INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+ INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
- INIT_WORK(&mad_agent_priv->local_work, local_completions,
- mad_agent_priv);
+ INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
/*
* IB MAD completion callback
*/
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
{
struct ib_mad_port_private *port_priv;
struct ib_wc wc;
- port_priv = (struct ib_mad_port_private *)data;
+ port_priv = container_of(work, struct ib_mad_port_private, work);
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
}
EXPORT_SYMBOL(ib_cancel_mad);
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_local_private *local;
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv =
+ container_of(work, struct ib_mad_agent_private, local_work);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->local_list)) {
return ret;
}
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags, delay;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+ timed_work.work);
mad_send_wc.vendor_err = 0;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
ret = -ENOMEM;
goto error8;
}
- INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+ INIT_WORK(&port_priv->work, ib_mad_completion_handler);
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_mad_port_list);
struct list_head multicast_list;
struct rb_root multicast_tree;
- struct work_struct pkey_task;
- struct work_struct mcast_task;
+ struct delayed_work pkey_task;
+ struct delayed_work mcast_task;
struct work_struct flush_task;
struct work_struct restart_task;
- struct work_struct ah_reap_task;
+ struct delayed_work ah_reap_task;
struct ib_device *ca;
u8 port;
}
struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh);
- void ipoib_neigh_free(struct ipoib_neigh *neigh);
+ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
extern struct workqueue_struct *ipoib_workqueue;
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev);
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
if (neigh->ah)
ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
} else {
neigh->ah = NULL;
- __skb_queue_tail(&neigh->queue, skb);
if (!path->query && path_rec_start(dev, path))
goto err_list;
+
+ __skb_queue_tail(&neigh->queue, skb);
}
spin_unlock(&priv->lock);
list_del(&neigh->list);
err_path:
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
++priv->stats.tx_dropped;
dev_kfree_skb_any(skb);
*/
ipoib_put_ah(neigh->ah);
list_del(&neigh->list);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
spin_unlock(&priv->lock);
ipoib_path_lookup(skb, dev);
goto out;
if (neigh->ah)
ah = neigh->ah;
list_del(&neigh->list);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(n->dev, neigh);
}
spin_unlock_irqrestore(&priv->lock, flags);
return neigh;
}
- void ipoib_neigh_free(struct ipoib_neigh *neigh)
+ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
*to_ipoib_neigh(neigh->neighbour) = NULL;
+ while ((skb = __skb_dequeue(&neigh->queue))) {
+ ++priv->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
kfree(neigh);
}
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
- INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
- INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
- INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
- INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
- INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
+ INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
+ INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
+ INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
}
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
*/
if (neigh->ah)
ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
}
spin_unlock_irqrestore(&priv->lock, flags);
mcast->backoff = 1;
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
complete(&mcast->done);
return;
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
if (status == -ETIMEDOUT)
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ 0);
else
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ);
mcast->query_id = ret;
}
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, mcast_task.work);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
spin_lock_irq(&priv->lock);
}
}
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, restart_task);
+ struct net_device *dev = priv->dev;
struct dev_mc_list *mclist;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
wait_for_completion(&target->done);
}
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
{
- struct srp_target_port *target = target_ptr;
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, work);
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work, target);
+ INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work);
}
spin_unlock_irq(target->scsi_host->host_lock);
break;
}
- target->status = srp_alloc_iu_bufs(target);
- if (target->status)
- break;
+ if (!target->rx_ring[0]) {
+ target->status = srp_alloc_iu_bufs(target);
+ if (target->status)
+ break;
+ }
qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
if (!qp_attr) {
if (!target_host)
return -ENOMEM;
- target_host->max_lun = SRP_MAX_LUN;
+ target_host->max_lun = SRP_MAX_LUN;
+ target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target = host_to_target(target_host);
struct dvbt_set_parameters_msg param;
struct dvbt_get_status_msg status;
- struct work_struct query_work;
+ struct delayed_work query_work;
wait_queue_head_t poll_wq;
int pending_fe_events;
#ifdef ENABLE_RC
struct input_dev *rc_input_dev;
char phys[64];
- struct work_struct rc_query_work;
+ struct delayed_work rc_query_work;
int rc_input_event;
u32 rc_last_code;
unsigned long last_event_jiffies;
int i;
for (i=0; i<STREAM_URB_COUNT; i++)
- if (cinergyt2->stream_urb[i])
- usb_free_urb(cinergyt2->stream_urb[i]);
+ usb_free_urb(cinergyt2->stream_urb[i]);
usb_buffer_free(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
cinergyt2->streambuf, cinergyt2->streambuf_dmahandle);
cinergyt2_control_stream_transfer(cinergyt2, 0);
for (i=0; i<STREAM_URB_COUNT; i++)
- if (cinergyt2->stream_urb[i])
- usb_kill_urb(cinergyt2->stream_urb[i]);
+ usb_kill_urb(cinergyt2->stream_urb[i]);
}
static int cinergyt2_start_stream_xfer (struct cinergyt2 *cinergyt2)
#ifdef ENABLE_RC
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
{
- struct cinergyt2 *cinergyt2 = data;
+ struct cinergyt2 *cinergyt2 =
+ container_of(work, struct cinergyt2, rc_query_work.work);
char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
struct cinergyt2_rc_event rc_events[12];
int n, len, i;
strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
cinergyt2->rc_input_event = KEY_MAX;
cinergyt2->rc_last_code = ~0;
- INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+ INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
input_dev->name = DRIVER_NAME " remote control";
input_dev->phys = cinergyt2->phys;
#endif /* ENABLE_RC */
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
{
- struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+ struct cinergyt2 *cinergyt2 =
+ container_of(work, struct cinergyt2, query_work.work);
char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
struct dvbt_get_status_msg *s = &cinergyt2->status;
uint8_t lock_bits;
mutex_init(&cinergyt2->sem);
init_waitqueue_head (&cinergyt2->poll_wq);
- INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+ INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
cinergyt2->udev = interface_to_usbdev(intf);
cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
if (rd_blocks > s->block_count)
rd_blocks = s->block_count;
- if (!rd_blocks)
+ if (!rd_blocks) {
+ spin_unlock_irqrestore(&s->lock, flags);
return;
+ }
for (i = 0; i < rd_blocks; i++) {
if (block_to_user_buf(s, buf_ptr)) {
schedule_work(&s->work);
}
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
{
- struct saa6588 *s = (struct saa6588 *)data;
+ struct saa6588 *s = container_of(work, struct saa6588, work);
saa6588_i2c_poll(s);
mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
saa6588_configure(s);
/* start polling via eventd */
- INIT_WORK(&s->work, saa6588_work, s);
+ INIT_WORK(&s->work, saa6588_work);
init_timer(&s->timer);
s->timer.function = saa6588_timer;
s->timer.data = (unsigned long)s;
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* SD support Copyright (C) 2005 Pierre Ossman, All Rights Reserved.
+ * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
return err;
/*
- * Default bus width is 1 bit.
- */
- host->ios.bus_width = MMC_BUS_WIDTH_1;
-
- /*
- * We can only change the bus width of the selected
- * card so therefore we have to put the handling
+ * We can only change the bus width of SD cards when
+ * they are selected so we have to put the handling
* here.
+ *
+ * The card is in 1 bit mode by default so
+ * we only need to change if it supports the
+ * wider version.
*/
- if (host->caps & MMC_CAP_4_BIT_DATA) {
+ if (mmc_card_sd(card) &&
+ (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+
/*
- * The card is in 1 bit mode by default so
- * we only need to change if it supports the
- * wider version.
- */
- if (mmc_card_sd(card) &&
- (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ * Default bus width is 1 bit.
+ */
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+
+ if (host->caps & MMC_CAP_4_BIT_DATA) {
struct mmc_command cmd;
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.arg = SD_BUS_WIDTH_4;
static inline void mmc_delay(unsigned int ms)
{
- if (ms < HZ / 1000) {
- yield();
+ if (ms < 1000 / HZ) {
+ cond_resched();
mdelay(ms);
} else {
- msleep_interruptible (ms);
+ msleep(ms);
}
}
}
}
+ static void mmc_process_ext_csds(struct mmc_host *host)
+ {
+ int err;
+ struct mmc_card *card;
+
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+
+ struct scatterlist sg;
+
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ u8 *ext_csd;
+ ext_csd = kmalloc(512, GFP_KERNEL);
+ if (!ext_csd) {
+ printk("%s: could not allocate a buffer to receive the ext_csd."
+ "mmc v4 cards will be treated as v3.\n",
+ mmc_hostname(host));
+ return;
+ }
+
+ list_for_each_entry(card, &host->cards, node) {
+ if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+ continue;
+ if (mmc_card_sd(card))
+ continue;
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ continue;
+
+ err = mmc_select_card(host, card);
+ if (err != MMC_ERR_NONE) {
+ mmc_card_set_dead(card);
+ continue;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_EXT_CSD;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mmc_set_data_timeout(&data, card, 0);
+
+ data.blksz = 512;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ sg_init_one(&sg, ext_csd, 512);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+ mmc_card_set_dead(card);
+ continue;
+ }
+
+ switch (ext_csd[EXT_CSD_CARD_TYPE]) {
+ case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ break;
+ case EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 26000000;
+ break;
+ default:
+ /* MMC v4 spec says this cannot happen */
+ printk("%s: card is mmc v4 but doesn't support "
+ "any high-speed modes.\n",
+ mmc_hostname(card->host));
+ mmc_card_set_bad(card);
+ continue;
+ }
+
+ /* Activate highspeed support. */
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_HS_TIMING << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ printk("%s: failed to switch card to mmc v4 "
+ "high-speed mode.\n",
+ mmc_hostname(card->host));
+ continue;
+ }
+
+ mmc_card_set_highspeed(card);
+
+ /* Check for host support for wide-bus modes. */
+ if (!(host->caps & MMC_CAP_4_BIT_DATA)) {
+ continue;
+ }
+
+ /* Activate 4-bit support. */
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_BUS_WIDTH << 16) |
+ (EXT_CSD_BUS_WIDTH_4 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ printk("%s: failed to switch card to "
+ "mmc v4 4-bit bus mode.\n",
+ mmc_hostname(card->host));
+ continue;
+ }
+
+ host->ios.bus_width = MMC_BUS_WIDTH_4;
+ }
+
+ kfree(ext_csd);
+
+ mmc_deselect_cards(host);
+ }
+
static void mmc_read_scrs(struct mmc_host *host)
{
int err;
mmc_deselect_cards(host);
}
+ static void mmc_read_switch_caps(struct mmc_host *host)
+ {
+ int err;
+ struct mmc_card *card;
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ unsigned char *status;
+ struct scatterlist sg;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status) {
+ printk(KERN_WARNING "%s: Unable to allocate buffer for "
+ "reading switch capabilities.\n",
+ mmc_hostname(host));
+ return;
+ }
+
+ list_for_each_entry(card, &host->cards, node) {
+ if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+ continue;
+ if (!mmc_card_sd(card))
+ continue;
+ if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+ continue;
+
+ err = mmc_select_card(host, card);
+ if (err != MMC_ERR_NONE) {
+ mmc_card_set_dead(card);
+ continue;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = SD_SWITCH;
+ cmd.arg = 0x00FFFFF1;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mmc_set_data_timeout(&data, card, 0);
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ sg_init_one(&sg, status, 64);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+ mmc_card_set_dead(card);
+ continue;
+ }
+
+ if (status[13] & 0x02)
+ card->sw_caps.hs_max_dtr = 50000000;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = SD_SWITCH;
+ cmd.arg = 0x80FFFFF1;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mmc_set_data_timeout(&data, card, 0);
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ sg_init_one(&sg, status, 64);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+ mmc_card_set_dead(card);
+ continue;
+ }
+
+ if ((status[16] & 0xF) != 1) {
+ printk(KERN_WARNING "%s: Problem switching card "
+ "into high-speed mode!\n",
+ mmc_hostname(host));
+ continue;
+ }
+
+ mmc_card_set_highspeed(card);
+ }
+
+ kfree(status);
+
+ mmc_deselect_cards(host);
+ }
+
static unsigned int mmc_calculate_clock(struct mmc_host *host)
{
struct mmc_card *card;
unsigned int max_dtr = host->f_max;
list_for_each_entry(card, &host->cards, node)
- if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
- max_dtr = card->csd.max_dtr;
+ if (!mmc_card_dead(card)) {
+ if (mmc_card_highspeed(card) && mmc_card_sd(card)) {
+ if (max_dtr > card->sw_caps.hs_max_dtr)
+ max_dtr = card->sw_caps.hs_max_dtr;
+ } else if (mmc_card_highspeed(card) && !mmc_card_sd(card)) {
+ if (max_dtr > card->ext_csd.hs_max_dtr)
+ max_dtr = card->ext_csd.hs_max_dtr;
+ } else if (max_dtr > card->csd.max_dtr) {
+ max_dtr = card->csd.max_dtr;
+ }
+ }
pr_debug("%s: selected %d.%03dMHz transfer rate\n",
mmc_hostname(host),
mmc_read_csds(host);
- if (host->mode == MMC_MODE_SD)
+ if (host->mode == MMC_MODE_SD) {
mmc_read_scrs(host);
+ mmc_read_switch_caps(host);
+ } else
+ mmc_process_ext_csds(host);
}
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
- if (delay)
- mmc_schedule_delayed_work(&host->detect, delay);
- else
- mmc_schedule_work(&host->detect);
+ mmc_schedule_delayed_work(&host->detect, delay);
}
EXPORT_SYMBOL(mmc_detect_change);
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
{
- struct mmc_host *host = data;
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
struct list_head *l, *n;
unsigned char power_mode;
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_LIST_HEAD(&host->cards);
- INIT_WORK(&host->detect, mmc_rescan, host);
+ INIT_DELAYED_WORK(&host->detect, mmc_rescan);
/*
* By default, hosts do not support SGIO or large requests.
*/
int mmc_resume_host(struct mmc_host *host)
{
- mmc_rescan(host);
+ mmc_rescan(&host->detect.work);
return 0;
}
memset(card, 0, sizeof(struct mmc_card));
card->host = host;
device_initialize(&card->dev);
- card->dev.parent = card->host->dev;
+ card->dev.parent = mmc_dev(host);
card->dev.bus = &mmc_bus_type;
card->dev.release = mmc_release_card;
}
}
- static void mmc_host_classdev_release(struct class_device *dev)
+ static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
kfree(host);
static struct class mmc_host_class = {
.name = "mmc_host",
- .release = mmc_host_classdev_release,
+ .dev_release = mmc_host_classdev_release,
};
static DEFINE_IDR(mmc_host_idr);
if (host) {
memset(host, 0, sizeof(struct mmc_host) + extra);
- host->dev = dev;
- host->class_dev.dev = host->dev;
+ host->parent = dev;
+ host->class_dev.parent = dev;
host->class_dev.class = &mmc_host_class;
- class_device_initialize(&host->class_dev);
+ device_initialize(&host->class_dev);
}
return host;
if (err)
return err;
- snprintf(host->class_dev.class_id, BUS_ID_SIZE,
+ snprintf(host->class_dev.bus_id, BUS_ID_SIZE,
"mmc%d", host->index);
- return class_device_add(&host->class_dev);
+ return device_add(&host->class_dev);
}
/*
*/
void mmc_remove_host_sysfs(struct mmc_host *host)
{
- class_device_del(&host->class_dev);
+ device_del(&host->class_dev);
spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
*/
void mmc_free_host_sysfs(struct mmc_host *host)
{
- class_device_put(&host->class_dev);
+ put_device(&host->class_dev);
}
static struct workqueue_struct *workqueue;
-/*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
- return queue_work(workqueue, work);
-}
-
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
#include "bnx2.h"
#include "bnx2_fw.h"
+ #include "bnx2_fw2.h"
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
- #define DRV_MODULE_VERSION "1.4.45"
- #define DRV_MODULE_RELDATE "September 29, 2006"
+ #define DRV_MODULE_VERSION "1.5.1"
+ #define DRV_MODULE_RELDATE "November 15, 2006"
#define RUN_AT(x) (jiffies + (x))
NC370F,
BCM5708,
BCM5708S,
+ BCM5709,
} board_t;
/* indexed by board_t, above */
{ "HP NC370F Multifunction Gigabit Server Adapter" },
{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+ { "Broadcom NetXtreme II BCM5709 1000Base-T" },
};
static struct pci_device_id bnx2_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
{ 0, }
};
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
offset += cid_addr;
- REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
- REG_WR(bp, BNX2_CTX_DATA, val);
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ int i;
+
+ REG_WR(bp, BNX2_CTX_CTX_DATA, val);
+ REG_WR(bp, BNX2_CTX_CTX_CTRL,
+ offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+ for (i = 0; i < 5; i++) {
+ u32 val;
+ val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+ if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+ break;
+ udelay(5);
+ }
+ } else {
+ REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
+ REG_WR(bp, BNX2_CTX_DATA, val);
+ }
}
static int
{
int i;
+ for (i = 0; i < bp->ctx_pages; i++) {
+ if (bp->ctx_blk[i]) {
+ pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
+ bp->ctx_blk[i],
+ bp->ctx_blk_mapping[i]);
+ bp->ctx_blk[i] = NULL;
+ }
+ }
if (bp->status_blk) {
pci_free_consistent(bp->pdev, bp->status_stats_size,
bp->status_blk, bp->status_blk_mapping);
bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+ if (bp->ctx_pages == 0)
+ bp->ctx_pages = 1;
+ for (i = 0; i < bp->ctx_pages; i++) {
+ bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
+ BCM_PAGE_SIZE,
+ &bp->ctx_blk_mapping[i]);
+ if (bp->ctx_blk[i] == NULL)
+ goto alloc_mem_err;
+ }
+ }
return 0;
alloc_mem_err:
val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
- BNX2_EMAC_MODE_25G);
+ BNX2_EMAC_MODE_25G_MODE);
if (bp->link_up) {
switch (bp->line_speed) {
case SPEED_10:
- if (CHIP_NUM(bp) == CHIP_NUM_5708) {
- val |= BNX2_EMAC_MODE_PORT_MII_10;
+ if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+ val |= BNX2_EMAC_MODE_PORT_MII_10M;
break;
}
/* fall through */
val |= BNX2_EMAC_MODE_PORT_MII;
break;
case SPEED_2500:
- val |= BNX2_EMAC_MODE_25G;
+ val |= BNX2_EMAC_MODE_25G_MODE;
/* fall through */
case SPEED_1000:
val |= BNX2_EMAC_MODE_PORT_GMII;
u32 bmsr;
u8 link_up;
- if (bp->loopback == MAC_LOOPBACK) {
+ if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
bp->link_up = 1;
return 0;
}
u32 bmcr;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
+ bmcr &= ~BCM5708S_BMCR_FORCE_2500;
if (!(bmcr & BMCR_ANENABLE)) {
bnx2_write_phy(bp, MII_BMCR, bmcr |
BMCR_ANENABLE);
u32 new_bmcr;
int force_link_down = 0;
- if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ bnx2_read_phy(bp, MII_ADVERTISE, &adv);
+ adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+
+ bnx2_read_phy(bp, MII_BMCR, &bmcr);
+ new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
+ new_bmcr |= BMCR_SPEED1000;
+ if (bp->req_line_speed == SPEED_2500) {
+ new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+ bnx2_read_phy(bp, BCM5708S_UP1, &up1);
+ if (!(up1 & BCM5708S_UP1_2G5)) {
+ up1 |= BCM5708S_UP1_2G5;
+ bnx2_write_phy(bp, BCM5708S_UP1, up1);
+ force_link_down = 1;
+ }
+ } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
bnx2_read_phy(bp, BCM5708S_UP1, &up1);
if (up1 & BCM5708S_UP1_2G5) {
up1 &= ~BCM5708S_UP1_2G5;
}
}
- bnx2_read_phy(bp, MII_ADVERTISE, &adv);
- adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
-
- bnx2_read_phy(bp, MII_BMCR, &bmcr);
- new_bmcr = bmcr & ~BMCR_ANENABLE;
- new_bmcr |= BMCR_SPEED1000;
if (bp->req_duplex == DUPLEX_FULL) {
adv |= ADVERTISE_1000XFULL;
new_bmcr |= BMCR_FULLDPLX;
bp->link_up = 0;
netif_carrier_off(bp->dev);
bnx2_write_phy(bp, MII_BMCR, new_bmcr);
+ bnx2_report_link(bp);
}
bnx2_write_phy(bp, MII_ADVERTISE, adv);
bnx2_write_phy(bp, MII_BMCR, new_bmcr);
if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
/* Force a link down visible on the other side */
if (bp->link_up) {
- int i;
-
bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
- for (i = 0; i < 110; i++) {
- udelay(100);
- }
+ spin_unlock_bh(&bp->phy_lock);
+ msleep(20);
+ spin_lock_bh(&bp->phy_lock);
}
bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
BMCR_ANENABLE);
- if (CHIP_NUM(bp) == CHIP_NUM_5706) {
- /* Speed up link-up time when the link partner
- * does not autonegotiate which is very common
- * in blade servers. Some blade servers use
- * IPMI for kerboard input and it's important
- * to minimize link disruptions. Autoneg. involves
- * exchanging base pages plus 3 next pages and
- * normally completes in about 120 msec.
- */
- bp->current_interval = SERDES_AN_TIMEOUT;
- bp->serdes_an_pending = 1;
- mod_timer(&bp->timer, jiffies + bp->current_interval);
- }
+ /* Speed up link-up time when the link partner
+ * does not autonegotiate which is very common
+ * in blade servers. Some blade servers use
+ * IPMI for kerboard input and it's important
+ * to minimize link disruptions. Autoneg. involves
+ * exchanging base pages plus 3 next pages and
+ * normally completes in about 120 msec.
+ */
+ bp->current_interval = SERDES_AN_TIMEOUT;
+ bp->serdes_an_pending = 1;
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
}
return 0;
}
if (new_bmcr != bmcr) {
u32 bmsr;
- int i = 0;
bnx2_read_phy(bp, MII_BMSR, &bmsr);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
if (bmsr & BMSR_LSTATUS) {
/* Force link down */
bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
- do {
- udelay(100);
- bnx2_read_phy(bp, MII_BMSR, &bmsr);
- bnx2_read_phy(bp, MII_BMSR, &bmsr);
- i++;
- } while ((bmsr & BMSR_LSTATUS) && (i < 620));
+ spin_unlock_bh(&bp->phy_lock);
+ msleep(50);
+ spin_lock_bh(&bp->phy_lock);
+
+ bnx2_read_phy(bp, MII_BMSR, &bmsr);
+ bnx2_read_phy(bp, MII_BMSR, &bmsr);
}
bnx2_write_phy(bp, MII_BMCR, new_bmcr);
{
bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
- if (CHIP_NUM(bp) == CHIP_NUM_5706) {
- REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
- }
+ if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
if (bp->dev->mtu > 1500) {
u32 val;
for (i = 0; i < 10; i++) {
if (bnx2_test_link(bp) == 0)
break;
- udelay(10);
+ msleep(100);
}
mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
- BNX2_EMAC_MODE_25G);
+ BNX2_EMAC_MODE_25G_MODE);
mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
return 0;
}
+ static int
+ bnx2_init_5709_context(struct bnx2 *bp)
+ {
+ int i, ret = 0;
+ u32 val;
+
+ val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+ val |= (BCM_PAGE_BITS - 8) << 16;
+ REG_WR(bp, BNX2_CTX_COMMAND, val);
+ for (i = 0; i < bp->ctx_pages; i++) {
+ int j;
+
+ REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+ (bp->ctx_blk_mapping[i] & 0xffffffff) |
+ BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+ REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+ (u64) bp->ctx_blk_mapping[i] >> 32);
+ REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+ BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+ for (j = 0; j < 10; j++) {
+
+ val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+ if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+ break;
+ udelay(5);
+ }
+ if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ return ret;
+ }
+
static void
bnx2_init_context(struct bnx2 *bp)
{
return -ENOMEM;
}
- if (unlikely((align = (unsigned long) skb->data & 0x7))) {
- skb_reserve(skb, 8 - align);
- }
+ if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
+ skb_reserve(skb, BNX2_RX_ALIGN - align);
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (dev->flags & IFF_PROMISC) {
/* Promiscuous mode. */
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
- sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
+ sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+ BNX2_RPM_SORT_USER0_PROM_VLAN;
}
else if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
}
}
- static void
+ static int
load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
{
u32 offset;
u32 val;
+ int rc;
/* Halt the CPU. */
val = REG_RD_IND(bp, cpu_reg->mode);
/* Load the Text area. */
offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
- if (fw->text) {
+ if (fw->gz_text) {
+ u32 text_len;
+ void *text;
+
+ rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
+ &text_len);
+ if (rc)
+ return rc;
+
+ fw->text = text;
+ }
+ if (fw->gz_text) {
int j;
for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
val &= ~cpu_reg->mode_value_halt;
REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
REG_WR_IND(bp, cpu_reg->mode, val);
+
+ return 0;
}
static int
bnx2_init_cpus(struct bnx2 *bp)
{
struct cpu_reg cpu_reg;
- struct fw_info fw;
+ struct fw_info *fw;
int rc = 0;
void *text;
u32 text_len;
cpu_reg.spad_base = BNX2_RXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
- fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
- fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
- fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
- fw.start_addr = bnx2_RXP_b06FwStartAddr;
-
- fw.text_addr = bnx2_RXP_b06FwTextAddr;
- fw.text_len = bnx2_RXP_b06FwTextLen;
- fw.text_index = 0;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ fw = &bnx2_rxp_fw_09;
+ else
+ fw = &bnx2_rxp_fw_06;
- rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
- &text, &text_len);
+ rc = load_cpu_fw(bp, &cpu_reg, fw);
if (rc)
goto init_cpu_err;
- fw.text = text;
-
- fw.data_addr = bnx2_RXP_b06FwDataAddr;
- fw.data_len = bnx2_RXP_b06FwDataLen;
- fw.data_index = 0;
- fw.data = bnx2_RXP_b06FwData;
-
- fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
- fw.sbss_len = bnx2_RXP_b06FwSbssLen;
- fw.sbss_index = 0;
- fw.sbss = bnx2_RXP_b06FwSbss;
-
- fw.bss_addr = bnx2_RXP_b06FwBssAddr;
- fw.bss_len = bnx2_RXP_b06FwBssLen;
- fw.bss_index = 0;
- fw.bss = bnx2_RXP_b06FwBss;
-
- fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
- fw.rodata_len = bnx2_RXP_b06FwRodataLen;
- fw.rodata_index = 0;
- fw.rodata = bnx2_RXP_b06FwRodata;
-
- load_cpu_fw(bp, &cpu_reg, &fw);
-
/* Initialize the TX Processor. */
cpu_reg.mode = BNX2_TXP_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
cpu_reg.spad_base = BNX2_TXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
- fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
- fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
- fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
- fw.start_addr = bnx2_TXP_b06FwStartAddr;
-
- fw.text_addr = bnx2_TXP_b06FwTextAddr;
- fw.text_len = bnx2_TXP_b06FwTextLen;
- fw.text_index = 0;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ fw = &bnx2_txp_fw_09;
+ else
+ fw = &bnx2_txp_fw_06;
- rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
- &text, &text_len);
+ rc = load_cpu_fw(bp, &cpu_reg, fw);
if (rc)
goto init_cpu_err;
- fw.text = text;
-
- fw.data_addr = bnx2_TXP_b06FwDataAddr;
- fw.data_len = bnx2_TXP_b06FwDataLen;
- fw.data_index = 0;
- fw.data = bnx2_TXP_b06FwData;
-
- fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
- fw.sbss_len = bnx2_TXP_b06FwSbssLen;
- fw.sbss_index = 0;
- fw.sbss = bnx2_TXP_b06FwSbss;
-
- fw.bss_addr = bnx2_TXP_b06FwBssAddr;
- fw.bss_len = bnx2_TXP_b06FwBssLen;
- fw.bss_index = 0;
- fw.bss = bnx2_TXP_b06FwBss;
-
- fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
- fw.rodata_len = bnx2_TXP_b06FwRodataLen;
- fw.rodata_index = 0;
- fw.rodata = bnx2_TXP_b06FwRodata;
-
- load_cpu_fw(bp, &cpu_reg, &fw);
-
/* Initialize the TX Patch-up Processor. */
cpu_reg.mode = BNX2_TPAT_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
- fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
- fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
- fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
- fw.start_addr = bnx2_TPAT_b06FwStartAddr;
-
- fw.text_addr = bnx2_TPAT_b06FwTextAddr;
- fw.text_len = bnx2_TPAT_b06FwTextLen;
- fw.text_index = 0;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ fw = &bnx2_tpat_fw_09;
+ else
+ fw = &bnx2_tpat_fw_06;
- rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
- &text, &text_len);
+ rc = load_cpu_fw(bp, &cpu_reg, fw);
if (rc)
goto init_cpu_err;
- fw.text = text;
-
- fw.data_addr = bnx2_TPAT_b06FwDataAddr;
- fw.data_len = bnx2_TPAT_b06FwDataLen;
- fw.data_index = 0;
- fw.data = bnx2_TPAT_b06FwData;
-
- fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
- fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
- fw.sbss_index = 0;
- fw.sbss = bnx2_TPAT_b06FwSbss;
-
- fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
- fw.bss_len = bnx2_TPAT_b06FwBssLen;
- fw.bss_index = 0;
- fw.bss = bnx2_TPAT_b06FwBss;
-
- fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
- fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
- fw.rodata_index = 0;
- fw.rodata = bnx2_TPAT_b06FwRodata;
-
- load_cpu_fw(bp, &cpu_reg, &fw);
-
/* Initialize the Completion Processor. */
cpu_reg.mode = BNX2_COM_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
cpu_reg.spad_base = BNX2_COM_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
- fw.ver_major = bnx2_COM_b06FwReleaseMajor;
- fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
- fw.ver_fix = bnx2_COM_b06FwReleaseFix;
- fw.start_addr = bnx2_COM_b06FwStartAddr;
-
- fw.text_addr = bnx2_COM_b06FwTextAddr;
- fw.text_len = bnx2_COM_b06FwTextLen;
- fw.text_index = 0;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ fw = &bnx2_com_fw_09;
+ else
+ fw = &bnx2_com_fw_06;
- rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
- &text, &text_len);
+ rc = load_cpu_fw(bp, &cpu_reg, fw);
if (rc)
goto init_cpu_err;
- fw.text = text;
-
- fw.data_addr = bnx2_COM_b06FwDataAddr;
- fw.data_len = bnx2_COM_b06FwDataLen;
- fw.data_index = 0;
- fw.data = bnx2_COM_b06FwData;
-
- fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
- fw.sbss_len = bnx2_COM_b06FwSbssLen;
- fw.sbss_index = 0;
- fw.sbss = bnx2_COM_b06FwSbss;
-
- fw.bss_addr = bnx2_COM_b06FwBssAddr;
- fw.bss_len = bnx2_COM_b06FwBssLen;
- fw.bss_index = 0;
- fw.bss = bnx2_COM_b06FwBss;
-
- fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
- fw.rodata_len = bnx2_COM_b06FwRodataLen;
- fw.rodata_index = 0;
- fw.rodata = bnx2_COM_b06FwRodata;
+ /* Initialize the Command Processor. */
+ cpu_reg.mode = BNX2_CP_CPU_MODE;
+ cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
+ cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
+ cpu_reg.state = BNX2_CP_CPU_STATE;
+ cpu_reg.state_value_clear = 0xffffff;
+ cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
+ cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
+ cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
+ cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
+ cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
+ cpu_reg.spad_base = BNX2_CP_SCRATCH;
+ cpu_reg.mips_view_base = 0x8000000;
- load_cpu_fw(bp, &cpu_reg, &fw);
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ fw = &bnx2_cp_fw_09;
+ load_cpu_fw(bp, &cpu_reg, fw);
+ if (rc)
+ goto init_cpu_err;
+ }
init_cpu_err:
bnx2_gunzip_end(bp);
return rc;
* before we issue a reset. */
val = REG_RD(bp, BNX2_MISC_ID);
- val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
- BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
- BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+ REG_RD(bp, BNX2_MISC_COMMAND);
+ udelay(5);
- /* Chip reset. */
- REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+ val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
- if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5706_A1))
- msleep(15);
+ pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
- /* Reset takes approximate 30 usec */
- for (i = 0; i < 10; i++) {
- val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
- if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
- BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
- break;
+ } else {
+ val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+ BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+ /* Chip reset. */
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ / 50);
}
- udelay(10);
- }
- if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
- BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
- printk(KERN_ERR PFX "Chip reset did not complete\n");
- return -EBUSY;
+ /* Reset takes approximate 30 usec */
+ for (i = 0; i < 10; i++) {
+ val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+ if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+ BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+ BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
+ printk(KERN_ERR PFX "Chip reset did not complete\n");
+ return -EBUSY;
+ }
}
/* Make sure byte swapping is properly configured. */
/* Initialize context mapping and zero out the quick contexts. The
* context block must have already been enabled. */
- bnx2_init_context(bp);
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_init_5709_context(bp);
+ else
+ bnx2_init_context(bp);
if ((rc = bnx2_init_cpus(bp)) != 0)
return rc;
return rc;
}
+ static void
+ bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
+ {
+ u32 val, offset0, offset1, offset2, offset3;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ offset0 = BNX2_L2CTX_TYPE_XI;
+ offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+ } else {
+ offset0 = BNX2_L2CTX_TYPE;
+ offset1 = BNX2_L2CTX_CMD_TYPE;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+ }
+ val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+ CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
+
+ val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+ CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
+
+ val = (u64) bp->tx_desc_mapping >> 32;
+ CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
+
+ val = (u64) bp->tx_desc_mapping & 0xffffffff;
+ CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
+ }
static void
bnx2_init_tx_ring(struct bnx2 *bp)
{
struct tx_bd *txbd;
- u32 val;
+ u32 cid;
bp->tx_wake_thresh = bp->tx_ring_size / 2;
bp->hw_tx_cons = 0;
bp->tx_prod_bseq = 0;
- val = BNX2_L2CTX_TYPE_TYPE_L2;
- val |= BNX2_L2CTX_TYPE_SIZE_L2;
- CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
+ cid = TX_CID;
+ bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+ bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
- val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
- val |= 8 << 16;
- CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
-
- val = (u64) bp->tx_desc_mapping >> 32;
- CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
-
- val = (u64) bp->tx_desc_mapping & 0xffffffff;
- CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
+ bnx2_init_tx_context(bp, cid);
}
static void
/* 8 for CRC and VLAN */
bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
- /* 8 for alignment */
- bp->rx_buf_size = bp->rx_buf_use_size + 8;
+ /* hw alignment */
+ bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
ring_prod = prod = bp->rx_prod = 0;
bp->rx_cons = 0;
if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
return rc;
+ spin_lock_bh(&bp->phy_lock);
bnx2_init_phy(bp);
+ spin_unlock_bh(&bp->phy_lock);
bnx2_set_link(bp);
return 0;
}
bnx2_set_mac_loopback(bp);
}
else if (loopback_mode == BNX2_PHY_LOOPBACK) {
- bp->loopback = 0;
+ bp->loopback = PHY_LOOPBACK;
bnx2_set_phy_loopback(bp);
}
else
bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
bp->tx_prod_bseq += pkt_size;
- REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
- REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
+ REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
+ REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
udelay(100);
}
static void
- bnx2_timer(unsigned long data)
+ bnx2_5706_serdes_timer(struct bnx2 *bp)
{
- struct bnx2 *bp = (struct bnx2 *) data;
- u32 msg;
+ spin_lock(&bp->phy_lock);
+ if (bp->serdes_an_pending)
+ bp->serdes_an_pending--;
+ else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+ u32 bmcr;
- if (!netif_running(bp->dev))
- return;
+ bp->current_interval = bp->timer_interval;
- if (atomic_read(&bp->intr_sem) != 0)
- goto bnx2_restart_timer;
+ bnx2_read_phy(bp, MII_BMCR, &bmcr);
- msg = (u32) ++bp->fw_drv_pulse_wr_seq;
- REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+ if (bmcr & BMCR_ANENABLE) {
+ u32 phy1, phy2;
- bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
+ bnx2_write_phy(bp, 0x1c, 0x7c00);
+ bnx2_read_phy(bp, 0x1c, &phy1);
- if ((bp->phy_flags & PHY_SERDES_FLAG) &&
- (CHIP_NUM(bp) == CHIP_NUM_5706)) {
+ bnx2_write_phy(bp, 0x17, 0x0f01);
+ bnx2_read_phy(bp, 0x15, &phy2);
+ bnx2_write_phy(bp, 0x17, 0x0f01);
+ bnx2_read_phy(bp, 0x15, &phy2);
- spin_lock(&bp->phy_lock);
- if (bp->serdes_an_pending) {
- bp->serdes_an_pending--;
+ if ((phy1 & 0x10) && /* SIGNAL DETECT */
+ !(phy2 & 0x20)) { /* no CONFIG */
+
+ bmcr &= ~BMCR_ANENABLE;
+ bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+ bnx2_write_phy(bp, MII_BMCR, bmcr);
+ bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
+ }
}
- else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
- u32 bmcr;
+ }
+ else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+ (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
+ u32 phy2;
- bp->current_interval = bp->timer_interval;
+ bnx2_write_phy(bp, 0x17, 0x0f01);
+ bnx2_read_phy(bp, 0x15, &phy2);
+ if (phy2 & 0x20) {
+ u32 bmcr;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
+ bmcr |= BMCR_ANENABLE;
+ bnx2_write_phy(bp, MII_BMCR, bmcr);
- if (bmcr & BMCR_ANENABLE) {
- u32 phy1, phy2;
+ bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+ }
+ } else
+ bp->current_interval = bp->timer_interval;
- bnx2_write_phy(bp, 0x1c, 0x7c00);
- bnx2_read_phy(bp, 0x1c, &phy1);
+ spin_unlock(&bp->phy_lock);
+ }
- bnx2_write_phy(bp, 0x17, 0x0f01);
- bnx2_read_phy(bp, 0x15, &phy2);
- bnx2_write_phy(bp, 0x17, 0x0f01);
- bnx2_read_phy(bp, 0x15, &phy2);
+ static void
+ bnx2_5708_serdes_timer(struct bnx2 *bp)
+ {
+ if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
+ bp->serdes_an_pending = 0;
+ return;
+ }
- if ((phy1 & 0x10) && /* SIGNAL DETECT */
- !(phy2 & 0x20)) { /* no CONFIG */
+ spin_lock(&bp->phy_lock);
+ if (bp->serdes_an_pending)
+ bp->serdes_an_pending--;
+ else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+ u32 bmcr;
- bmcr &= ~BMCR_ANENABLE;
- bmcr |= BMCR_SPEED1000 |
- BMCR_FULLDPLX;
- bnx2_write_phy(bp, MII_BMCR, bmcr);
- bp->phy_flags |=
- PHY_PARALLEL_DETECT_FLAG;
- }
- }
+ bnx2_read_phy(bp, MII_BMCR, &bmcr);
+
+ if (bmcr & BMCR_ANENABLE) {
+ bmcr &= ~BMCR_ANENABLE;
+ bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
+ bnx2_write_phy(bp, MII_BMCR, bmcr);
+ bp->current_interval = SERDES_FORCED_TIMEOUT;
+ } else {
+ bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
+ bmcr |= BMCR_ANENABLE;
+ bnx2_write_phy(bp, MII_BMCR, bmcr);
+ bp->serdes_an_pending = 2;
+ bp->current_interval = bp->timer_interval;
}
- else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
- (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
- u32 phy2;
- bnx2_write_phy(bp, 0x17, 0x0f01);
- bnx2_read_phy(bp, 0x15, &phy2);
- if (phy2 & 0x20) {
- u32 bmcr;
+ } else
+ bp->current_interval = bp->timer_interval;
- bnx2_read_phy(bp, MII_BMCR, &bmcr);
- bmcr |= BMCR_ANENABLE;
- bnx2_write_phy(bp, MII_BMCR, bmcr);
+ spin_unlock(&bp->phy_lock);
+ }
+
+ static void
+ bnx2_timer(unsigned long data)
+ {
+ struct bnx2 *bp = (struct bnx2 *) data;
+ u32 msg;
- bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+ if (!netif_running(bp->dev))
+ return;
- }
- }
- else
- bp->current_interval = bp->timer_interval;
+ if (atomic_read(&bp->intr_sem) != 0)
+ goto bnx2_restart_timer;
- spin_unlock(&bp->phy_lock);
+ msg = (u32) ++bp->fw_drv_pulse_wr_seq;
+ REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+
+ bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
+
+ if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ bnx2_5706_serdes_timer(bp);
+ else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ bnx2_5708_serdes_timer(bp);
}
bnx2_restart_timer:
}
static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
{
- struct bnx2 *bp = data;
+ struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
if (!netif_running(bp->dev))
return;
prod = NEXT_TX_BD(prod);
bp->tx_prod_bseq += skb->len;
- REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
- REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
+ REG_WR16(bp, bp->tx_bidx_addr, prod);
+ REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
mmiowb();
}
else {
if (bp->phy_flags & PHY_SERDES_FLAG) {
- if ((cmd->speed != SPEED_1000) ||
- (cmd->duplex != DUPLEX_FULL)) {
+ if ((cmd->speed != SPEED_1000 &&
+ cmd->speed != SPEED_2500) ||
+ (cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ if (cmd->speed == SPEED_2500 &&
+ !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
return -EINVAL;
- }
}
else if (cmd->speed == SPEED_1000) {
return -EINVAL;
msleep(20);
spin_lock_bh(&bp->phy_lock);
- if (CHIP_NUM(bp) == CHIP_NUM_5706) {
- bp->current_interval = SERDES_AN_TIMEOUT;
- bp->serdes_an_pending = 1;
- mod_timer(&bp->timer, jiffies + bp->current_interval);
- }
+
+ bp->current_interval = SERDES_AN_TIMEOUT;
+ bp->serdes_an_pending = 1;
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
}
bnx2_read_phy(bp, MII_BMCR, &bmcr);
memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ int i;
+
bnx2_netif_stop(bp);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
bnx2_free_skbs(bp);
}
/* wait for link up */
- msleep_interruptible(3000);
- if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
- msleep_interruptible(4000);
+ for (i = 0; i < 7; i++) {
+ if (bp->link_up)
+ break;
+ msleep_interruptible(1000);
+ }
}
if (bnx2_test_nvram(bp) != 0) {
goto err_out_release;
}
- bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
- if (bp->pcix_cap == 0) {
- dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
- rc = -EIO;
- goto err_out_release;
- }
-
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
bp->flags |= USING_DAC_FLAG;
if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+ INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
- mem_len = MB_GET_CID_ADDR(17);
+ mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq;
bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
+ if (CHIP_NUM(bp) != CHIP_NUM_5709) {
+ bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+ if (bp->pcix_cap == 0) {
+ dev_err(&pdev->dev,
+ "Cannot find PCIX capability, aborting.\n");
+ rc = -EIO;
+ goto err_out_unmap;
+ }
+ }
+
/* Get bus information. */
reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
bp->phy_addr = 1;
/* Disable WOL support if we are running on a SERDES chip. */
- if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+ bp->phy_flags |= PHY_SERDES_FLAG;
+ } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
bp->phy_flags |= PHY_SERDES_FLAG;
+
+ if (bp->phy_flags & PHY_SERDES_FLAG) {
bp->flags |= NO_WOL_FLAG;
- if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ if (CHIP_NUM(bp) != CHIP_NUM_5706) {
bp->phy_addr = 2;
reg = REG_RD_IND(bp, bp->shmem_base +
BNX2_SHARED_HW_CFG_CONFIG);
u64 csum_start_off, csum_stuff_off;
csum_start_off = (u64) (skb->h.raw - skb->data);
- csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+ csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
return 0;
}
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
{
- struct cas *cp = (struct cas *) data;
+ struct cas *cp = container_of(work, struct cas, reset_task);
#if 0
int pending = atomic_read(&cp->reset_task_pending);
#else
atomic_set(&cp->reset_task_pending_spare, 0);
atomic_set(&cp->reset_task_pending_mtu, 0);
#endif
- INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+ INIT_WORK(&cp->reset_task, cas_reset_task);
/* Default link parameters */
if (link_mode >= 0 && link_mode <= 6)
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/init.h>
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
- #define DRV_VERSION "2.1.1"
+ #define DRV_VERSION "2.2"
#define PFX DRV_NAME ": "
#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
+ /*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+ #define CH_MSG(adapter, level, category, fmt, ...) do { \
+ if ((adapter)->msg_enable & NETIF_MSG_##category) \
+ printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
+ ## __VA_ARGS__); \
+ } while (0)
+
+ #ifdef DEBUG
+ # define CH_DBG(adapter, category, fmt, ...) \
+ CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+ #else
+ # define CH_DBG(fmt, ...)
+ #endif
+
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
typedef struct adapter adapter_t;
- void t1_elmer0_ext_intr(adapter_t *adapter);
- void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
- int speed, int duplex, int fc);
-
struct t1_rx_mode {
struct net_device *dev;
u32 idx;
}
#define MAX_NPORTS 4
+ #define PORT_MASK ((1 << MAX_NPORTS) - 1)
+ #define NMTUS 8
+ #define TCB_SIZE 128
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
enum {
CHBT_BOARD_N110,
- CHBT_BOARD_N210
+ CHBT_BOARD_N210,
+ CHBT_BOARD_7500,
+ CHBT_BOARD_8000,
+ CHBT_BOARD_CHT101,
+ CHBT_BOARD_CHT110,
+ CHBT_BOARD_CHT210,
+ CHBT_BOARD_CHT204,
+ CHBT_BOARD_CHT204V,
+ CHBT_BOARD_CHT204E,
+ CHBT_BOARD_CHN204,
+ CHBT_BOARD_COUGAR,
+ CHBT_BOARD_6800,
+ CHBT_BOARD_SIMUL,
};
enum {
+ CHBT_TERM_FPGA,
CHBT_TERM_T1,
- CHBT_TERM_T2
+ CHBT_TERM_T2,
+ CHBT_TERM_T3
};
enum {
+ CHBT_MAC_CHELSIO_A,
+ CHBT_MAC_IXF1010,
CHBT_MAC_PM3393,
+ CHBT_MAC_VSC7321,
+ CHBT_MAC_DUMMY
};
enum {
+ CHBT_PHY_88E1041,
+ CHBT_PHY_88E1111,
CHBT_PHY_88X2010,
+ CHBT_PHY_XPAK,
+ CHBT_PHY_MY3126,
+ CHBT_PHY_8244,
+ CHBT_PHY_DUMMY
};
enum {
unsigned char is_pcix;
};
+ struct tp_params {
+ unsigned int pm_size;
+ unsigned int cm_size;
+ unsigned int pm_rx_base;
+ unsigned int pm_tx_base;
+ unsigned int pm_rx_pg_size;
+ unsigned int pm_tx_pg_size;
+ unsigned int pm_rx_num_pgs;
+ unsigned int pm_tx_num_pgs;
+ unsigned int rx_coalescing_size;
+ unsigned int use_5tuple_mode;
+ };
+
+ struct mc5_params {
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nroutes; /* size of routing region */
+ };
+
+ /* Default MC5 region sizes */
+ #define DEFAULT_SERVER_REGION_LEN 256
+ #define DEFAULT_RT_REGION_LEN 1024
+
struct adapter_params {
struct sge_params sge;
+ struct mc5_params mc5;
+ struct tp_params tp;
struct chelsio_pci_params pci;
const struct board_info *brd_info;
+ unsigned short mtus[NMTUS];
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period;
unsigned short chip_revision;
unsigned char chip_version;
+ unsigned char is_asic;
+ unsigned char has_msi;
};
struct link_config {
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
+ struct petp *tp;
struct port_info port[MAX_NPORTS];
- struct work_struct stats_update_task;
+ struct delayed_work stats_update_task;
struct timer_list stats_update_timer;
- struct semaphore mib_mutex;
spinlock_t tpi_lock;
spinlock_t work_lock;
+ spinlock_t mac_lock;
+
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
u32 slow_intr_mask;
+ int t1powersave;
};
enum { /* adapter flags */
const char *desc;
};
+ static inline int t1_is_asic(const adapter_t *adapter)
+ {
+ return adapter->params.is_asic;
+ }
+
extern struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
return board_info(adap)->clock_core / 1000000;
}
+ extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+ extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
extern void t1_interrupts_enable(adapter_t *adapter);
extern void t1_interrupts_disable(adapter_t *adapter);
extern void t1_interrupts_clear(adapter_t *adapter);
- extern int elmer0_ext_intr_handler(adapter_t *adapter);
+ extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
extern int t1_slow_intr_handler(adapter_t *adapter);
extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
extern void t1_free_sw_modules(adapter_t *adapter);
extern void t1_fatal_err(adapter_t *adapter);
-
- extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
- extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
- extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
-
+ extern void t1_link_changed(adapter_t *adapter, int port_id);
+ extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+ int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */
#include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/sockios.h>
- #include <linux/proc_fs.h>
#include <linux/dma-mapping.h>
#include <asm/uaccess.h>
#include "gmac.h"
#include "cphy.h"
#include "sge.h"
+ #include "tp.h"
#include "espi.h"
+ #include "elmer0.h"
- #ifdef work_struct
- #include <linux/tqueue.h>
- #define INIT_WORK INIT_TQUEUE
- #define schedule_work schedule_task
- #define flush_scheduled_work flush_scheduled_tasks
-
- static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
- {
- mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
- }
-
- static inline void cancel_mac_stats_update(struct adapter *ap)
- {
- del_timer_sync(&ap->stats_update_timer);
- flush_scheduled_tasks();
- }
-
- /*
- * Stats update timer for 2.4. It schedules a task to do the actual update as
- * we need to access MAC statistics in process context.
- */
- static void mac_stats_timer(unsigned long data)
- {
- struct adapter *ap = (struct adapter *)data;
-
- schedule_task(&ap->stats_update_task);
- }
- #else
#include <linux/workqueue.h>
static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
{
cancel_delayed_work(&ap->stats_update_task);
}
- #endif
#define MAX_CMDQ_ENTRIES 16384
#define MAX_CMDQ1_ENTRIES 1024
#define MAX_RX_JUMBO_BUFFERS 16384
#define MAX_TX_BUFFERS_HIGH 16384U
#define MAX_TX_BUFFERS_LOW 1536U
+ #define MAX_TX_BUFFERS 1460U
#define MIN_FL_ENTRIES 32
- #define PORT_MASK ((1 << MAX_NPORTS) - 1)
-
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0);
- MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+ MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
+
+ #define HCLOCK 0x0
+ #define LCLOCK 0x1
+
+ /* T1 cards powersave mode */
+ static int t1_clock(struct adapter *adapter, int mode);
+ static int t1powersave = 1; /* HW default is powersave mode. */
+ module_param(t1powersave, int, 0);
+ MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
+
+ static int disable_msi = 0;
+ module_param(disable_msi, int, 0);
+ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
static const char pci_speed[][4] = {
"33", "66", "100", "133"
static void link_report(struct port_info *p)
{
if (!netif_carrier_ok(p->dev))
- printk(KERN_INFO "%s: link down\n", p->dev->name);
+ printk(KERN_INFO "%s: link down\n", p->dev->name);
else {
const char *s = "10Mbps";
case SPEED_100: s = "100Mbps"; break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+ printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
p->dev->name, s,
p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
}
}
- void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+ void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
int speed, int duplex, int pause)
{
struct port_info *p = &adapter->port[port_id];
netif_carrier_off(p->dev);
link_report(p);
+ /* multi-ports: inform toe */
+ if ((speed > 0) && (adapter->params.nports > 1)) {
+ unsigned int sched_speed = 10;
+ switch (speed) {
+ case SPEED_1000:
+ sched_speed = 1000;
+ break;
+ case SPEED_100:
+ sched_speed = 100;
+ break;
+ case SPEED_10:
+ sched_speed = 10;
+ break;
+ }
+ t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
+ }
}
}
static void enable_hw_csum(struct adapter *adapter)
{
if (adapter->flags & TSO_CAPABLE)
- t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
- t1_tp_set_tcp_checksum_offload(adapter, 1);
+ t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
+ if (adapter->flags & UDP_CSUM_CAPABLE)
+ t1_tp_set_udp_checksum_offload(adapter->tp, 1);
+ t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
}
/*
}
t1_interrupts_clear(adapter);
- if ((err = request_irq(adapter->pdev->irq,
- t1_select_intr_handler(adapter), IRQF_SHARED,
- adapter->name, adapter))) {
+
+ adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0;
+ err = request_irq(adapter->pdev->irq,
+ t1_select_intr_handler(adapter),
+ adapter->params.has_msi ? 0 : IRQF_SHARED,
+ adapter->name, adapter);
+ if (err) {
+ if (adapter->params.has_msi)
+ pci_disable_msi(adapter->pdev);
+
goto out_err;
}
+
t1_sge_start(adapter->sge);
t1_interrupts_enable(adapter);
out_err:
t1_sge_stop(adapter->sge);
t1_interrupts_disable(adapter);
free_irq(adapter->pdev->irq, adapter);
+ if (adapter->params.has_msi)
+ pci_disable_msi(adapter->pdev);
}
static int cxgb_open(struct net_device *dev)
/* Do a full update of the MAC stats */
pstats = p->mac->ops->statistics_update(p->mac,
- MAC_STATS_UPDATE_FULL);
+ MAC_STATS_UPDATE_FULL);
ns->tx_packets = pstats->TxUnicastFramesOK +
pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
}
static char stats_strings[][ETH_GSTRING_LEN] = {
- "TxOctetsOK",
- "TxOctetsBad",
- "TxUnicastFramesOK",
- "TxMulticastFramesOK",
- "TxBroadcastFramesOK",
- "TxPauseFrames",
- "TxFramesWithDeferredXmissions",
- "TxLateCollisions",
- "TxTotalCollisions",
- "TxFramesAbortedDueToXSCollisions",
- "TxUnderrun",
- "TxLengthErrors",
- "TxInternalMACXmitError",
- "TxFramesWithExcessiveDeferral",
- "TxFCSErrors",
-
- "RxOctetsOK",
- "RxOctetsBad",
- "RxUnicastFramesOK",
- "RxMulticastFramesOK",
- "RxBroadcastFramesOK",
- "RxPauseFrames",
- "RxFCSErrors",
- "RxAlignErrors",
- "RxSymbolErrors",
- "RxDataErrors",
- "RxSequenceErrors",
- "RxRuntErrors",
- "RxJabberErrors",
- "RxInternalMACRcvError",
- "RxInRangeLengthErrors",
- "RxOutOfRangeLengthField",
- "RxFrameTooLongErrors",
-
- "TSO",
- "VLANextractions",
- "VLANinsertions",
+ "TxOctetsOK",
+ "TxOctetsBad",
+ "TxUnicastFramesOK",
+ "TxMulticastFramesOK",
+ "TxBroadcastFramesOK",
+ "TxPauseFrames",
+ "TxFramesWithDeferredXmissions",
+ "TxLateCollisions",
+ "TxTotalCollisions",
+ "TxFramesAbortedDueToXSCollisions",
+ "TxUnderrun",
+ "TxLengthErrors",
+ "TxInternalMACXmitError",
+ "TxFramesWithExcessiveDeferral",
+ "TxFCSErrors",
+
+ "RxOctetsOK",
+ "RxOctetsBad",
+ "RxUnicastFramesOK",
+ "RxMulticastFramesOK",
+ "RxBroadcastFramesOK",
+ "RxPauseFrames",
+ "RxFCSErrors",
+ "RxAlignErrors",
+ "RxSymbolErrors",
+ "RxDataErrors",
+ "RxSequenceErrors",
+ "RxRuntErrors",
+ "RxJabberErrors",
+ "RxInternalMACRcvError",
+ "RxInRangeLengthErrors",
+ "RxOutOfRangeLengthField",
+ "RxFrameTooLongErrors",
+
+ /* Port stats */
+ "RxPackets",
"RxCsumGood",
+ "TxPackets",
"TxCsumOffload",
- "RxDrops"
-
+ "TxTso",
+ "RxVlan",
+ "TxVlan",
+
+ /* Interrupt stats */
+ "rx drops",
+ "pure_rsps",
+ "unhandled irqs",
"respQ_empty",
"respQ_overflow",
"freelistQ_empty",
"pkt_mismatch",
"cmdQ_full0",
"cmdQ_full1",
- "tx_ipfrags",
- "tx_reg_pkts",
- "tx_lso_pkts",
- "tx_do_cksum",
-
+
"espi_DIP2ParityErr",
"espi_DIP4Err",
"espi_RxDrops",
"espi_RxOvfl",
"espi_ParityErr"
};
-
+
#define T2_REGMAP_SIZE (3 * 1024)
static int get_regs_len(struct net_device *dev)
struct adapter *adapter = dev->priv;
struct cmac *mac = adapter->port[dev->if_port].mac;
const struct cmac_statistics *s;
- const struct sge_port_stats *ss;
const struct sge_intr_counts *t;
+ struct sge_port_stats ss;
s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
- ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
- t = t1_sge_get_intr_counts(adapter->sge);
- *data++ = s->TxOctetsOK;
- *data++ = s->TxOctetsBad;
- *data++ = s->TxUnicastFramesOK;
- *data++ = s->TxMulticastFramesOK;
- *data++ = s->TxBroadcastFramesOK;
- *data++ = s->TxPauseFrames;
- *data++ = s->TxFramesWithDeferredXmissions;
- *data++ = s->TxLateCollisions;
- *data++ = s->TxTotalCollisions;
- *data++ = s->TxFramesAbortedDueToXSCollisions;
- *data++ = s->TxUnderrun;
- *data++ = s->TxLengthErrors;
- *data++ = s->TxInternalMACXmitError;
- *data++ = s->TxFramesWithExcessiveDeferral;
- *data++ = s->TxFCSErrors;
-
- *data++ = s->RxOctetsOK;
- *data++ = s->RxOctetsBad;
- *data++ = s->RxUnicastFramesOK;
- *data++ = s->RxMulticastFramesOK;
- *data++ = s->RxBroadcastFramesOK;
- *data++ = s->RxPauseFrames;
- *data++ = s->RxFCSErrors;
- *data++ = s->RxAlignErrors;
- *data++ = s->RxSymbolErrors;
- *data++ = s->RxDataErrors;
- *data++ = s->RxSequenceErrors;
- *data++ = s->RxRuntErrors;
- *data++ = s->RxJabberErrors;
- *data++ = s->RxInternalMACRcvError;
- *data++ = s->RxInRangeLengthErrors;
- *data++ = s->RxOutOfRangeLengthField;
- *data++ = s->RxFrameTooLongErrors;
-
- *data++ = ss->tso;
- *data++ = ss->vlan_xtract;
- *data++ = ss->vlan_insert;
- *data++ = ss->rx_cso_good;
- *data++ = ss->tx_cso;
- *data++ = ss->rx_drops;
-
- *data++ = (u64)t->respQ_empty;
- *data++ = (u64)t->respQ_overflow;
- *data++ = (u64)t->freelistQ_empty;
- *data++ = (u64)t->pkt_too_big;
- *data++ = (u64)t->pkt_mismatch;
- *data++ = (u64)t->cmdQ_full[0];
- *data++ = (u64)t->cmdQ_full[1];
- *data++ = (u64)t->tx_ipfrags;
- *data++ = (u64)t->tx_reg_pkts;
- *data++ = (u64)t->tx_lso_pkts;
- *data++ = (u64)t->tx_do_cksum;
+ *data++ = s->TxOctetsOK;
+ *data++ = s->TxOctetsBad;
+ *data++ = s->TxUnicastFramesOK;
+ *data++ = s->TxMulticastFramesOK;
+ *data++ = s->TxBroadcastFramesOK;
+ *data++ = s->TxPauseFrames;
+ *data++ = s->TxFramesWithDeferredXmissions;
+ *data++ = s->TxLateCollisions;
+ *data++ = s->TxTotalCollisions;
+ *data++ = s->TxFramesAbortedDueToXSCollisions;
+ *data++ = s->TxUnderrun;
+ *data++ = s->TxLengthErrors;
+ *data++ = s->TxInternalMACXmitError;
+ *data++ = s->TxFramesWithExcessiveDeferral;
+ *data++ = s->TxFCSErrors;
+
+ *data++ = s->RxOctetsOK;
+ *data++ = s->RxOctetsBad;
+ *data++ = s->RxUnicastFramesOK;
+ *data++ = s->RxMulticastFramesOK;
+ *data++ = s->RxBroadcastFramesOK;
+ *data++ = s->RxPauseFrames;
+ *data++ = s->RxFCSErrors;
+ *data++ = s->RxAlignErrors;
+ *data++ = s->RxSymbolErrors;
+ *data++ = s->RxDataErrors;
+ *data++ = s->RxSequenceErrors;
+ *data++ = s->RxRuntErrors;
+ *data++ = s->RxJabberErrors;
+ *data++ = s->RxInternalMACRcvError;
+ *data++ = s->RxInRangeLengthErrors;
+ *data++ = s->RxOutOfRangeLengthField;
+ *data++ = s->RxFrameTooLongErrors;
+
+ t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+ *data++ = ss.rx_packets;
+ *data++ = ss.rx_cso_good;
+ *data++ = ss.tx_packets;
+ *data++ = ss.tx_cso;
+ *data++ = ss.tx_tso;
+ *data++ = ss.vlan_xtract;
+ *data++ = ss.vlan_insert;
+
+ t = t1_sge_get_intr_counts(adapter->sge);
+ *data++ = t->rx_drops;
+ *data++ = t->pure_rsps;
+ *data++ = t->unhandled_irqs;
+ *data++ = t->respQ_empty;
+ *data++ = t->respQ_overflow;
+ *data++ = t->freelistQ_empty;
+ *data++ = t->pkt_too_big;
+ *data++ = t->pkt_mismatch;
+ *data++ = t->cmdQ_full[0];
+ *data++ = t->cmdQ_full[1];
+
+ if (adapter->espi) {
+ const struct espi_intr_counts *e;
+
+ e = t1_espi_get_intr_counts(adapter->espi);
+ *data++ = e->DIP2_parity_err;
+ *data++ = e->DIP4_err;
+ *data++ = e->rx_drops;
+ *data++ = e->tx_drops;
+ *data++ = e->rx_ovflw;
+ *data++ = e->parity_err;
+ }
}
static inline void reg_block_dump(struct adapter *ap, void *buf,
memset(buf, 0, T2_REGMAP_SIZE);
reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+ reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
+ reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
+ reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
+ reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
+ reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
+ reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
+ reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
+ reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
+ reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex = -1;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy->addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->phy_address = p->phy->addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = p->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
return 0;
}
return -EINVAL;
if (adapter->flags & FULL_INIT_DONE)
- return -EBUSY;
+ return -EBUSY;
adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
static int get_eeprom_len(struct net_device *dev)
{
- return EEPROM_SIZE;
+ struct adapter *adapter = dev->priv;
+
+ return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
}
#define EEPROM_MAGIC(ap) \
.set_tso = set_tso,
};
- static void cxgb_proc_cleanup(struct adapter *adapter,
- struct proc_dir_entry *dir)
- {
- const char *name;
- name = adapter->name;
- remove_proc_entry(name, dir);
- }
- //#define chtoe_setup_toedev(adapter) NULL
- #define update_mtu_tab(adapter)
- #define write_smt_entry(adapter, idx)
-
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct adapter *adapter = dev->priv;
- struct mii_ioctl_data *data = if_mii(req);
+ struct adapter *adapter = dev->priv;
+ struct mii_ioctl_data *data = if_mii(req);
switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = adapter->port[dev->if_port].phy->addr;
- /* FALLTHRU */
- case SIOCGMIIREG: {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->port[dev->if_port].phy->addr;
+ /* FALLTHRU */
+ case SIOCGMIIREG: {
struct cphy *phy = adapter->port[dev->if_port].phy;
u32 val;
if (!phy->mdio_read)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
&val);
- data->val_out = val;
- break;
+ data->val_out = val;
+ break;
}
- case SIOCSMIIREG: {
+ case SIOCSMIIREG: {
struct cphy *phy = adapter->port[dev->if_port].phy;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!phy->mdio_write)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
data->val_in);
- break;
+ break;
}
default:
struct cmac *mac = adapter->port[dev->if_port].mac;
if (!mac->ops->set_mtu)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
if (new_mtu < 68)
- return -EINVAL;
+ return -EINVAL;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
dev->mtu = new_mtu;
struct adapter *adapter = dev->priv;
local_irq_save(flags);
- t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
+ t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
local_irq_restore(flags);
}
#endif
* Periodic accumulation of MAC statistics. This is used only if the MAC
* does not have any other way to prevent stats counter overflow.
*/
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
{
int i;
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, stats_update_task.work);
for_each_port(adapter, i) {
struct port_info *p = &adapter->port[i];
/*
* Processes elmer0 external interrupts in process context.
*/
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
{
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, ext_intr_handler_task);
- elmer0_ext_intr_handler(adapter);
+ t1_elmer0_ext_intr_handler(adapter);
/* Now reenable external interrupts */
spin_lock_irq(&adapter->async_lock);
adapter->slow_intr_mask |= F_PL_INTR_EXT;
writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
+ adapter->regs + A_PL_ENABLE);
spin_unlock_irq(&adapter->async_lock);
}
*/
adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
+ adapter->regs + A_PL_ENABLE);
schedule_work(&adapter->ext_intr_handler_task);
}
err = pci_enable_device(pdev);
if (err)
- return err;
+ return err;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
CH_ERR("%s: cannot find PCI device memory base address\n",
pci_set_master(pdev);
- mmio_start = pci_resource_start(pdev, 0);
+ mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
bi = t1_get_board_info(ent->driver_data);
adapter->msg_enable = dflt_msg_enable;
adapter->mmio_len = mmio_len;
- init_MUTEX(&adapter->mib_mutex);
spin_lock_init(&adapter->tpi_lock);
spin_lock_init(&adapter->work_lock);
spin_lock_init(&adapter->async_lock);
+ spin_lock_init(&adapter->mac_lock);
INIT_WORK(&adapter->ext_intr_handler_task,
- ext_intr_task, adapter);
- INIT_WORK(&adapter->stats_update_task, mac_stats_task,
- adapter);
+ ext_intr_task);
+ INIT_DELAYED_WORK(&adapter->stats_update_task,
+ mac_stats_task);
- #ifdef work_struct
- init_timer(&adapter->stats_update_timer);
- adapter->stats_update_timer.function = mac_stats_timer;
- adapter->stats_update_timer.data =
- (unsigned long)adapter;
- #endif
pci_set_drvdata(pdev, netdev);
}
netdev->vlan_rx_register = vlan_rx_register;
netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
#endif
- adapter->flags |= TSO_CAPABLE;
- netdev->features |= NETIF_F_TSO;
+
+ /* T204: disable TSO */
+ if (!(is_T2(adapter)) || bi->port_number != 4) {
+ adapter->flags |= TSO_CAPABLE;
+ netdev->features |= NETIF_F_TSO;
+ }
}
netdev->open = cxgb_open;
netdev->stop = cxgb_close;
netdev->hard_start_xmit = t1_start_xmit;
netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
- sizeof(struct cpl_tx_pkt_lso) :
- sizeof(struct cpl_tx_pkt);
+ sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
netdev->get_stats = t1_get_stats;
netdev->set_multicast_list = t1_set_rxmode;
netdev->do_ioctl = t1_ioctl;
#endif
netdev->weight = 64;
- SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+ SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
}
if (t1_init_sw_modules(adapter, bi) < 0) {
if (!adapter->registered_device_map)
adapter->name = adapter->port[i].dev->name;
- __set_bit(i, &adapter->registered_device_map);
+ __set_bit(i, &adapter->registered_device_map);
}
}
if (!adapter->registered_device_map) {
bi->desc, adapter->params.chip_revision,
adapter->params.pci.is_pcix ? "PCIX" : "PCI",
adapter->params.pci.speed, adapter->params.pci.width);
+
+ /*
+ * Set the T1B ASIC and memory clocks.
+ */
+ if (t1powersave)
+ adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
+ else
+ adapter->t1powersave = HCLOCK;
+ if (t1_is_T1B(adapter))
+ t1_clock(adapter, t1powersave);
+
return 0;
out_release_adapter_res:
t1_free_sw_modules(adapter);
out_free_dev:
if (adapter) {
- if (adapter->regs) iounmap(adapter->regs);
+ if (adapter->regs)
+ iounmap(adapter->regs);
for (i = bi->port_number - 1; i >= 0; --i)
- if (adapter->port[i].dev) {
- cxgb_proc_cleanup(adapter, proc_root_driver);
- kfree(adapter->port[i].dev);
- }
+ if (adapter->port[i].dev)
+ free_netdev(adapter->port[i].dev);
}
pci_release_regions(pdev);
out_disable_pdev:
return err;
}
+ static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
+ {
+ int data;
+ int i;
+ u32 val;
+
+ enum {
+ S_CLOCK = 1 << 3,
+ S_DATA = 1 << 4
+ };
+
+ for (i = (nbits - 1); i > -1; i--) {
+
+ udelay(50);
+
+ data = ((bitdata >> i) & 0x1);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+
+ if (data)
+ val |= S_DATA;
+ else
+ val &= ~S_DATA;
+
+ udelay(50);
+
+ /* Set SCLOCK low */
+ val &= ~S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ udelay(50);
+
+ /* Write SCLOCK high */
+ val |= S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ }
+ }
+
+ static int t1_clock(struct adapter *adapter, int mode)
+ {
+ u32 val;
+ int M_CORE_VAL;
+ int M_MEM_VAL;
+
+ enum {
+ M_CORE_BITS = 9,
+ T_CORE_VAL = 0,
+ T_CORE_BITS = 2,
+ N_CORE_VAL = 0,
+ N_CORE_BITS = 2,
+ M_MEM_BITS = 9,
+ T_MEM_VAL = 0,
+ T_MEM_BITS = 2,
+ N_MEM_VAL = 0,
+ N_MEM_BITS = 2,
+ NP_LOAD = 1 << 17,
+ S_LOAD_MEM = 1 << 5,
+ S_LOAD_CORE = 1 << 6,
+ S_CLOCK = 1 << 3
+ };
+
+ if (!t1_is_T1B(adapter))
+ return -ENODEV; /* Can't re-clock this chip. */
+
+ if (mode & 2) {
+ return 0; /* show current mode. */
+ }
+
+ if ((adapter->t1powersave & 1) == (mode & 1))
+ return -EALREADY; /* ASIC already running in mode. */
+
+ if ((mode & 1) == HCLOCK) {
+ M_CORE_VAL = 0x14;
+ M_MEM_VAL = 0x18;
+ adapter->t1powersave = HCLOCK; /* overclock */
+ } else {
+ M_CORE_VAL = 0xe;
+ M_MEM_VAL = 0x10;
+ adapter->t1powersave = LCLOCK; /* underclock */
+ }
+
+ /* Don't interrupt this serial stream! */
+ spin_lock(&adapter->tpi_lock);
+
+ /* Initialize for ASIC core */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= NP_LOAD;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_CORE;
+ val &= ~S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Serial program the ASIC clock synthesizer */
+ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
+ bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
+ bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
+ udelay(50);
+
+ /* Finish ASIC core */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= S_LOAD_CORE;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_CORE;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Initialize for memory */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= NP_LOAD;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_MEM;
+ val &= ~S_CLOCK;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Serial program the memory clock synthesizer */
+ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
+ bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
+ bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
+ udelay(50);
+
+ /* Finish memory */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= S_LOAD_MEM;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_MEM;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ spin_unlock(&adapter->tpi_lock);
+
+ return 0;
+ }
+
static inline void t1_sw_reset(struct pci_dev *pdev)
{
pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
t1_free_sw_modules(adapter);
iounmap(adapter->regs);
while (--i >= 0)
- if (adapter->port[i].dev) {
- cxgb_proc_cleanup(adapter, proc_root_driver);
- kfree(adapter->port[i].dev);
- }
+ if (adapter->port[i].dev)
+ free_netdev(adapter->port[i].dev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
* the literal in the instruction before the code is loaded, the
* driver can change the algorithm.
*
- * INTDELAY - This loads the dead-man timer with its inital value.
+ * INTDELAY - This loads the dead-man timer with its initial value.
* When this timer expires the interrupt is asserted, and the
* timer is reset each time a new packet is received. (see
* BUNDLEMAX below to set the limit on number of chained packets)
schedule_work(&nic->tx_timeout_task);
}
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
{
- struct nic *nic = netdev_priv(netdev);
+ struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+ struct net_device *netdev = nic->netdev;
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
readb(&nic->csr->scb.status));
nic->blink_timer.function = e100_blink_led;
nic->blink_timer.data = (unsigned long)nic;
- INIT_WORK(&nic->tx_timeout_task,
- (void (*)(void *))e100_tx_timeout_task, netdev);
+ INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if((err = e100_alloc(nic))) {
DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
*******************************************************************************/
#include "e1000.h"
+ #include <net/ip6_checksum.h>
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
- #define DRV_VERSION "7.2.9-k4"DRIVERNAPI
+ #define DRV_VERSION "7.3.15-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
INTEL_E1000_ETHERNET_DEVICE(0x10BA),
INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+ INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+ INTEL_E1000_ETHERNET_DEVICE(0x10C5),
/* required last entry */
{0,}
};
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
+ #ifdef CONFIG_PCI_MSI
+ static irqreturn_t e1000_intr_msi(int irq, void *data);
+ #endif
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
#ifdef CONFIG_E1000_NAPI
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_tx_timeout(struct net_device *dev);
-static void e1000_reset_task(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
flags = IRQF_SHARED;
#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ if (adapter->hw.mac_type >= e1000_82571) {
adapter->have_msi = TRUE;
if ((err = pci_enable_msi(adapter->pdev))) {
DPRINTK(PROBE, ERR,
adapter->have_msi = FALSE;
}
}
- if (adapter->have_msi)
+ if (adapter->have_msi) {
flags &= ~IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+ netdev->name, netdev);
+ if (err)
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+ } else
#endif
if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
netdev->name, netdev)))
* e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the netowrk i/f is closed.
+ * of the f/w this means that the network i/f is closed.
*
**/
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the netowrk i/f is open.
+ * of the f/w this means that the network i/f is open.
*
**/
uint32_t ctrl_ext;
uint32_t swsm;
uint32_t extcnf;
+
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
case e1000_82571:
e1000_reset(struct e1000_adapter *adapter)
{
uint32_t pba, manc;
- #ifdef DISABLE_MULR
- uint32_t tctl;
- #endif
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
/* Repartition Pba for greater than 9k mtu
e1000_reset_hw(&adapter->hw);
if (adapter->hw.mac_type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, WUC, 0);
- #ifdef DISABLE_MULR
- /* disable Multiple Reads in Transmit Control Register for debugging */
- tctl = E1000_READ_REG(hw, TCTL);
- E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR);
- #endif
if (e1000_init_hw(&adapter->hw))
DPRINTK(PROBE, ERR, "Hardware Error\n");
e1000_update_mng_vlan(adapter);
(adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
- #ifdef NETIF_F_TSO_IPV6
+ #ifdef NETIF_F_TSO6
if (adapter->hw.mac_type > e1000_82547_rev_2)
- netdev->features |= NETIF_F_TSO_IPV6;
+ netdev->features |= NETIF_F_TSO6;
#endif
#endif
if (pci_using_dac)
adapter->phy_info_timer.function = &e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
- INIT_WORK(&adapter->reset_task,
- (void (*)(void *))e1000_reset_task, netdev);
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
e1000_check_options(adapter);
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
return -EBUSY;
/* allocate transmit descriptors */
-
if ((err = e1000_setup_all_tx_resources(adapter)))
goto err_setup_tx;
/* allocate receive descriptors */
-
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
tarc = E1000_READ_REG(hw, TARC0);
+ /* set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later */
tarc |= (1 << 21);
E1000_WRITE_REG(hw, TARC0, tarc);
} else if (hw->mac_type == e1000_80003es2lan) {
e1000_config_collision_dist(hw);
/* Setup Transmit Descriptor Settings for eop descriptor */
- adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
- E1000_TXD_CMD_IFCS;
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
if (hw->mac_type < e1000_82543)
adapter->txd_cmd |= E1000_TXD_CMD_RPS;
/* Configure extra packet-split registers */
rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
- /* disable IPv6 packet split support */
- rfctl |= E1000_RFCTL_IPV6_DIS;
+ /* disable packet split support for IPv6 extension headers,
+ * because some malformed IPv6 headers can hang the RX */
+ rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+ E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
rctl |= E1000_RCTL_DTYP_PS;
if (hw->mac_type >= e1000_82540) {
E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
- if (adapter->itr > 1)
+ if (adapter->itr_setting != 0)
E1000_WRITE_REG(hw, ITR,
1000000000 / (adapter->itr * 256));
}
/* Reset delay timers after every interrupt */
ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
#ifdef CONFIG_E1000_NAPI
- /* Auto-Mask interrupts upon ICR read. */
+ /* Auto-Mask interrupts upon ICR access */
ctrl_ext |= E1000_CTRL_EXT_IAME;
+ E1000_WRITE_REG(hw, IAM, 0xffffffff);
#endif
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_REG(hw, IAM, ~0);
E1000_WRITE_FLUSH(hw);
}
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
+ /* enable early receives on 82573, only takes effect if using > 2048
+ * byte total frame size. for example only for jumbo frames */
+ #define E1000_ERT_2048 0x100
+ if (hw->mac_type == e1000_82573)
+ E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
+
/* Enable Receives */
E1000_WRITE_REG(hw, RCTL, rctl);
}
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
}
- if (buffer_info->skb)
+ if (buffer_info->skb) {
dev_kfree_skb_any(buffer_info->skb);
- memset(buffer_info, 0, sizeof(struct e1000_buffer));
+ buffer_info->skb = NULL;
+ }
+ /* buffer_info must be completely set up in the transmit path */
}
/**
DPRINTK(LINK, INFO,
"Gigabit has been disabled, downgrading speed\n");
}
+
if (adapter->hw.mac_type == e1000_82573) {
e1000_enable_tx_pkt_filtering(&adapter->hw);
if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
txb2b == 0) {
- #define SPEED_MODE_BIT (1 << 21)
uint32_t tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
- tarc0 &= ~SPEED_MODE_BIT;
+ tarc0 &= ~(1 << 21);
E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
}
-
+
#ifdef NETIF_F_TSO
/* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues */
DPRINTK(PROBE,INFO,
"10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
+ #ifdef NETIF_F_TSO6
+ netdev->features &= ~NETIF_F_TSO6;
+ #endif
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
+ #ifdef NETIF_F_TSO6
+ netdev->features |= NETIF_F_TSO6;
+ #endif
break;
default:
/* oops */
}
}
- /* Dynamic mode for Interrupt Throttle Rate (ITR) */
- if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
- /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
- * asymmetrical Tx or Rx gets ITR=8000; everyone
- * else is between 2000-8000. */
- uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
- uint32_t dif = (adapter->gotcl > adapter->gorcl ?
- adapter->gotcl - adapter->gorcl :
- adapter->gorcl - adapter->gotcl) / 10000;
- uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
- }
-
/* Cause software interrupt to ensure rx ring is cleaned */
E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
}
+ enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+ };
+
+ /**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ uint16_t itr_setting,
+ int packets,
+ int bytes)
+ {
+ unsigned int retval = itr_setting;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ goto update_itr_done;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ if ((packets < 10) ||
+ ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (packets <= 2 && bytes < 512)
+ retval = lowest_latency;
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else {
+ if (bytes < 6000)
+ retval = low_latency;
+ }
+ break;
+ }
+
+ update_itr_done:
+ return retval;
+ }
+
+ static void e1000_set_itr(struct e1000_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ uint16_t current_itr;
+ uint32_t new_itr = adapter->itr;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ return;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (unlikely(adapter->link_speed != SPEED_1000)) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ /* conservative mode eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency && (adapter->itr_setting == 3))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+ set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+ }
+
+ return;
+ }
+
#define E1000_TX_FLAGS_CSUM 0x00000001
#define E1000_TX_FLAGS_VLAN 0x00000002
#define E1000_TX_FLAGS_TSO 0x00000004
0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1;
- #ifdef NETIF_F_TSO_IPV6
+ #ifdef NETIF_F_TSO6
} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0;
skb->h.th->check =
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
if (unlikely(++i == tx_ring->count)) i = 0;
tx_ring->next_to_use = i;
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
len -= size;
offset += size;
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
len -= size;
offset += size;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
}
/**
/* A reprieve! */
netif_start_queue(netdev);
+ ++adapter->restart_queue;
return 0;
}
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
- * points to just header, pull a few bytes of payload from
- * frags into skb->data */
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
switch (adapter->hw.mac_type) {
}
static void
-e1000_reset_task(struct net_device *netdev)
+e1000_reset_task(struct work_struct *work)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_adapter *adapter =
+ container_of(work, struct e1000_adapter, reset_task);
e1000_reinit_locked(adapter);
}
adapter->stats.roc += E1000_READ_REG(hw, ROC);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
- adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
- adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
- adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
- adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
- adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+ adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
}
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
adapter->stats.tpr += E1000_READ_REG(hw, TPR);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
- adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
- adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
- adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
- adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
- adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+ adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
}
adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
- adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
- adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
- adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
- adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
- adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
- adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+ adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
+ adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
+ adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
+ adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
+ adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
}
}
/* Fill out the OS statistics structure */
-
adapter->net_stats.rx_packets = adapter->stats.gprc;
adapter->net_stats.tx_packets = adapter->stats.gptc;
adapter->net_stats.rx_bytes = adapter->stats.gorcl;
/* Tx Dropped needs to be maintained elsewhere */
/* Phy Stats */
-
if (hw->media_type == e1000_media_type_copper) {
if ((adapter->link_speed == SPEED_1000) &&
(!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+ #ifdef CONFIG_PCI_MSI
+
+ /**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+ static
+ irqreturn_t e1000_intr_msi(int irq, void *data)
+ {
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ #ifndef CONFIG_E1000_NAPI
+ int i;
+ #endif
+
+ /* this code avoids the read of ICR but has to get 1000 interrupts
+ * at every link change event before it will notice the change */
+ if (++adapter->detect_link >= 1000) {
+ uint32_t icr = E1000_READ_REG(hw, ICR);
+ #ifdef CONFIG_E1000_NAPI
+ /* read ICR disables interrupts using IAM, so keep up with our
+ * enable/disable accounting */
+ atomic_inc(&adapter->irq_sem);
+ #endif
+ adapter->detect_link = 0;
+ if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
+ (icr & E1000_ICR_INT_ASSERTED)) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (adapter->hw.mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ uint32_t rctl = E1000_READ_REG(hw, RCTL);
+ E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + 1);
+ }
+ } else {
+ E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
+ E1000_ICR_LSC)));
+ /* bummer we have to flush here, but things break otherwise as
+ * some event appears to be lost or delayed and throughput
+ * drops. In almost all tests this flush is un-necessary */
+ E1000_WRITE_FLUSH(hw);
+ #ifdef CONFIG_E1000_NAPI
+ /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
+ * masked. No need for the IMC write, but it does mean we
+ * should account for it ASAP. */
+ atomic_inc(&adapter->irq_sem);
+ #endif
+ }
+
+ #ifdef CONFIG_E1000_NAPI
+ if (likely(netif_rx_schedule_prep(netdev))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev);
+ } else
+ e1000_irq_enable(adapter);
+ #else
+ adapter->total_tx_bytes = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_packets = 0;
+
+ for (i = 0; i < E1000_MAX_INTR; i++)
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+ break;
+
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+ #endif
+
+ return IRQ_HANDLED;
+ }
+ #endif
/**
* e1000_intr - Interrupt Handler
uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
#ifndef CONFIG_E1000_NAPI
int i;
- #else
+ #endif
+ if (unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
+
+ #ifdef CONFIG_E1000_NAPI
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (unlikely(hw->mac_type >= e1000_82571 &&
+ !(icr & E1000_ICR_INT_ASSERTED)))
+ return IRQ_NONE;
+
/* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write, but it does mean we should
atomic_inc(&adapter->irq_sem);
#endif
- if (unlikely(!icr)) {
- #ifdef CONFIG_E1000_NAPI
- if (hw->mac_type >= e1000_82571)
- e1000_irq_enable(adapter);
- #endif
- return IRQ_NONE; /* Not our interrupt */
- }
-
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
/* 80003ES2LAN workaround--
#ifdef CONFIG_E1000_NAPI
if (unlikely(hw->mac_type < e1000_82571)) {
+ /* disable interrupts, without the synchronize_irq bit */
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
- if (likely(netif_rx_schedule_prep(netdev)))
+ if (likely(netif_rx_schedule_prep(netdev))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
__netif_rx_schedule(netdev);
- else
+ } else
+ /* this really should not happen! if it does it is basically a
+ * bug, but not a hard error, so enable ints and continue */
e1000_irq_enable(adapter);
#else
/* Writing IMC and IMS is needed for 82547.
E1000_WRITE_REG(hw, IMC, ~0);
}
+ adapter->total_tx_bytes = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_packets = 0;
+
for (i = 0; i < E1000_MAX_INTR; i++)
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+
if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
#endif
-
return IRQ_HANDLED;
}
if ((!tx_cleaned && (work_done == 0)) ||
!netif_running(poll_dev)) {
quit_polling:
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
return 0;
unsigned int count = 0;
#endif
boolean_t cleaned = FALSE;
+ unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
+ if (cleaned) {
+ /* this packet count is wrong for TSO but has a
+ * tendency to make dynamic ITR change more
+ * towards bulk */
+ total_tx_packets++;
+ total_tx_bytes += buffer_info->skb->len;
+ }
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
- memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+ tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count)) i = 0;
}
-
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
#ifdef CONFIG_E1000_NAPI
* sees the new next_to_clean.
*/
smp_mb();
- if (netif_queue_stopped(netdev))
+ if (netif_queue_stopped(netdev)) {
netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
}
if (adapter->detect_tx_hung) {
netif_stop_queue(netdev);
}
}
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
return cleaned;
}
unsigned int i;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
while (rx_desc->status & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
u8 status;
+
#ifdef CONFIG_E1000_NAPI
if (*work_done >= work_to_do)
break;
* done after the TBI_ACCEPT workaround above */
length -= 4;
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += length;
+ total_rx_packets++;
+
/* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack */
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
- skb_put(skb, length);
}
- } else
- skb_put(skb, length);
-
+ /* else just continue with the old one */
+ }
/* end copybreak code */
+ skb_put(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
return cleaned;
}
uint32_t length, staterr;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
goto copydone;
} /* if */
}
-
+
for (j = 0; j < adapter->rx_ps_pages; j++) {
if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
break;
pskb_trim(skb, skb->len - 4);
copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
e1000_rx_checksum(adapter, staterr,
le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
skb->protocol = eth_type_trans(skb, netdev);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
return cleaned;
}
}
skb = netdev_alloc_skb(netdev,
- adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+ adapter->rx_ps_bsize0 + NET_IP_ALIGN);
if (unlikely(!skb)) {
adapter->alloc_rx_buff_failed++;
return E1000_SUCCESS;
}
-
void
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
{
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
- /* enable VLAN receive filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- rctl |= E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- e1000_update_mng_vlan(adapter);
+ /* enable VLAN receive filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
}
} else {
/* disable VLAN tag insert/strip */
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
- /* disable VLAN filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- rctl &= ~E1000_RCTL_VFE;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
+ /* disable VLAN filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ if (adapter->mng_vlan_id !=
+ (uint16_t)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev,
+ adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
}
}
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
void ixgb_set_ethtool_ops(struct net_device *netdev);
static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
adapter->watchdog_timer.function = &ixgb_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->tx_timeout_task,
- (void (*)(void *))ixgb_tx_timeout_task, netdev);
+ INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev)))
if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info;
css = skb->h.raw - skb->data;
- cso = (skb->h.raw + skb->csum) - skb->data;
+ cso = css + skb->csum_offset;
i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
}
static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_adapter *adapter =
+ container_of(work, struct ixgb_adapter, tx_timeout_task);
adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
cksum_offset = (skb->h.raw - skb->data);
- pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
+ pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
*/
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
{
- struct myri10ge_priv *mgp = arg;
+ struct myri10ge_priv *mgp =
+ container_of(work, struct myri10ge_priv, watchdog_work);
u32 reboot;
int status;
u16 cmd, vendor;
(unsigned long)mgp);
SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
- INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+ INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/irq.h>
EXPORT_SYMBOL(phy_start_aneg);
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
static void phy_timer(unsigned long data);
/* phy_start_machine:
{
struct phy_device *phydev = phy_dat;
+ if (PHY_HALTED == phydev->state)
+ return IRQ_NONE; /* It can't be ours. */
+
/* The MDIO bus is not allowed to be written in interrupt
* context, so we need to disable the irq here. A work
* queue will write the PHY to disable and clear the
{
int err = 0;
- INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+ INIT_WORK(&phydev->phy_queue, phy_change);
if (request_irq(phydev->irq, phy_interrupt,
IRQF_SHARED,
if (err)
phy_error(phydev);
+ /*
+ * Finish any pending work; we might have been scheduled
+ * to be called from keventd ourselves, though.
+ */
+ if (!current_is_keventd())
+ flush_scheduled_work();
+
free_irq(phydev->irq, phydev);
return err;
/* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
{
int err;
- struct phy_device *phydev = data;
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
err = phy_disable_interrupts(phydev);
enable_irq(phydev->irq);
/* Reenable interrupts */
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+ if (PHY_HALTED != phydev->state)
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
if (err)
goto irq_enable_err;
if (PHY_HALTED == phydev->state)
goto out_unlock;
- if (phydev->irq != PHY_POLL) {
- /* Clear any pending interrupts */
- phy_clear_interrupt(phydev);
+ phydev->state = PHY_HALTED;
+ if (phydev->irq != PHY_POLL) {
/* Disable PHY Interrupts */
phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- }
- phydev->state = PHY_HALTED;
+ /* Clear any pending interrupts */
+ phy_clear_interrupt(phydev);
+ }
out_unlock:
spin_unlock(&phydev->lock);
+
+ /*
+ * Cannot call flush_scheduled_work() here as desired because
+ * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+ * will not reenable interrupts.
+ */
}
break;
case PHY_AN:
+ err = phy_read_status(phydev);
+
+ if (err < 0)
+ break;
+
+ /* If the link is down, give up on
+ * negotiation for now */
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ break;
+ }
+
/* Check if negotiation is done. Break
* if there's an error */
err = phy_aneg_done(phydev);
if (err < 0)
break;
- /* If auto-negotiation is done, we change to
- * either RUNNING, or NOLINK */
+ /* If AN is done, we're running */
if (err > 0) {
- err = phy_read_status(phydev);
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
- if (err)
+ } else if (0 == phydev->link_timeout--) {
+ int idx;
+
+ needs_aneg = 1;
+ /* If we have the magic_aneg bit,
+ * we try again */
+ if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- }
+ /* The timer expired, and we still
+ * don't have a setting, so we try
+ * forcing it until we find one that
+ * works, starting from the fastest speed,
+ * and working our way down */
+ idx = phy_find_valid(0, phydev->supported);
- phydev->adjust_link(phydev->attached_dev);
+ phydev->speed = settings[idx].speed;
+ phydev->duplex = settings[idx].duplex;
- } else if (0 == phydev->link_timeout--) {
- /* The counter expired, so either we
- * switch to forced mode, or the
- * magic_aneg bit exists, and we try aneg
- * again */
- if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
- int idx;
-
- /* We'll start from the
- * fastest speed, and work
- * our way down */
- idx = phy_find_valid(0,
- phydev->supported);
-
- phydev->speed = settings[idx].speed;
- phydev->duplex = settings[idx].duplex;
-
- phydev->autoneg = AUTONEG_DISABLE;
- phydev->state = PHY_FORCING;
- phydev->link_timeout =
- PHY_FORCE_TIMEOUT;
-
- pr_info("Trying %d/%s\n",
- phydev->speed,
- DUPLEX_FULL ==
- phydev->duplex ?
- "FULL" : "HALF");
- }
+ phydev->autoneg = AUTONEG_DISABLE;
- needs_aneg = 1;
+ pr_info("Trying %d/%s\n", phydev->speed,
+ DUPLEX_FULL ==
+ phydev->duplex ?
+ "FULL" : "HALF");
}
break;
case PHY_NOLINK:
}
break;
case PHY_FORCING:
- err = phy_read_status(phydev);
+ err = genphy_update_link(phydev);
if (err)
break;
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
+ struct net_device *dev;
struct net_device_stats stats; /* statistics of net device */
spinlock_t lock; /* spin lock flag */
u32 msg_enable;
void (*phy_reset_enable)(void __iomem *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
- struct work_struct task;
+ struct delayed_work task;
unsigned wol_enabled : 1;
};
{
unsigned int val;
- val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff;
- mdio_write(ioaddr, MII_BMCR, val);
+ mdio_write(ioaddr, MII_BMCR, BMCR_RESET);
+ val = mdio_read(ioaddr, MII_BMCR);
}
static void rtl8169_check_link_status(struct net_device *dev,
free_netdev(dev);
}
+ static void rtl8169_phy_reset(struct net_device *dev,
+ struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ tp->phy_reset_enable(ioaddr);
+ for (i = 0; i < 100; i++) {
+ if (!tp->phy_reset_pending(ioaddr))
+ return;
+ msleep(1);
+ }
+ if (netif_msg_link(tp))
+ printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+ }
+
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
+ rtl8169_phy_reset(dev, tp);
+
rtl8169_set_speed(dev, autoneg, speed, duplex);
if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
struct rtl8169_private *tp;
struct net_device *dev;
void __iomem *ioaddr;
- unsigned int i, pm_cap;
- int rc;
+ unsigned int pm_cap;
+ int i, rc;
if (netif_msg_drv(&debug)) {
printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
+ tp->dev = dev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
if (retval < 0)
goto err_free_rx;
- INIT_WORK(&tp->task, NULL, dev);
+ INIT_DELAYED_WORK(&tp->task, NULL);
rtl8169_hw_start(dev);
tp->cur_tx = tp->dirty_tx = 0;
}
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
{
struct rtl8169_private *tp = netdev_priv(dev);
- PREPARE_WORK(&tp->task, task, dev);
+ PREPARE_DELAYED_WORK(&tp->task, task);
schedule_delayed_work(&tp->task, 4);
}
netif_poll_enable(dev);
}
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
{
- struct net_device *dev = _data;
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
int ret;
if (netif_running(dev)) {
}
}
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
{
- struct net_device *dev = _data;
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
if (!netif_running(dev))
return;
* Since internal PHY is wired to a level triggered pin, can't
* get an interrupt when carrier is detected.
*/
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
{
- struct net_device *dev = arg;
- struct skge_port *skge = netdev_priv(arg);
+ struct skge_port *skge =
+ container_of(work, struct skge_port, link_thread.work);
+ struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
int port = skge->port;
u16 ctrl;
- gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
-
ctrl = gma_read16(hw, port, GM_GP_CTRL);
ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl);
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
}
- yukon_reset(hw, port);
skge_link_down(skge);
yukon_init(hw, port);
{
struct skge_hw *hw = skge->hw;
int port = skge->port;
+ struct net_device *dev = hw->dev[port];
netif_stop_queue(skge->netdev);
netif_carrier_off(skge->netdev);
yukon_init(hw, port);
}
mutex_unlock(&hw->phy_mutex);
+
+ dev->set_multicast_list(dev);
}
/* Basic MII support */
td->csum_offs = 0;
td->csum_start = offset;
- td->csum_write = offset + skb->csum;
+ td->csum_write = offset + skb->csum_offset;
} else
control = BMU_CHECK;
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
{
- struct skge_hw *hw = arg;
+ struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
int port;
mutex_lock(&hw->phy_mutex);
skge->port = port;
/* Only used for Genesis XMAC */
- INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+ INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
if (hw->chip_id != CHIP_ID_GENESIS) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
hw->pdev = pdev;
mutex_init(&hw->phy_mutex);
- INIT_WORK(&hw->phy_work, skge_extirq, hw);
+ INIT_WORK(&hw->phy_work, skge_extirq);
spin_lock_init(&hw->hw_lock);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
struct spider_net_descr *descr;
dma_addr_t buf;
unsigned long flags;
- int length;
- length = skb->len;
- if (length < ETH_ZLEN) {
- if (skb_pad(skb, ETH_ZLEN-length))
- return 0;
- length = ETH_ZLEN;
- }
-
- buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
+ buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(buf)) {
if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, length);
+ "Dropping packet\n", skb->data, skb->len);
card->spider_stats.tx_iommu_map_error++;
return -ENOMEM;
}
card->tx_chain.head = descr->next;
descr->buf_addr = buf;
- descr->buf_size = length;
+ descr->buf_size = skb->len;
descr->next_descr_addr = 0;
descr->skb = skb;
descr->data_status = 0;
/* unmap the skb */
if (skb) {
- int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
- pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
+ pci_unmap_single(card->pdev, buf_addr, skb->len,
+ PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
}
}
SPIDER_NET_INT2_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
+ SPIDER_NET_GDTBSTA);
}
/**
* called as task when tx hangs, resets interface (if interface is up)
*/
static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
{
- struct net_device *netdev = data;
- struct spider_net_card *card = netdev_priv(netdev);
+ struct spider_net_card *card =
+ container_of(work, struct spider_net_card, tx_timeout_task);
+ struct net_device *netdev = card->netdev;
if (!(netdev->flags & IFF_UP))
goto out;
card = netdev_priv(netdev);
card->netdev = netdev;
card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+ INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
init_waitqueue_head(&card->waitq);
atomic_set(&card->tx_timeout_task_counter, 0);
u64 csum_start_off, csum_stuff_off;
csum_start_off = (u64) (skb->h.raw - skb->data);
- csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+ csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = (TXDCTRL_CENAB |
(csum_start_off << 15) |
}
}
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
{
- struct gem *gp = (struct gem *) data;
+ struct gem *gp = container_of(work, struct gem, reset_task);
mutex_lock(&gp->pm_mutex);
gp->link_timer.function = gem_link_timer;
gp->link_timer.data = (unsigned long) gp;
- INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+ INIT_WORK(&gp->reset_task, gem_reset_task);
gp->lstate = link_down;
gp->timer_ticks = 0;
/* Chipcommon registers. */
#define BCM43xx_CHIPCOMMON_CAPABILITIES 0x04
+ #define BCM43xx_CHIPCOMMON_CTL 0x28
#define BCM43xx_CHIPCOMMON_PLLONDELAY 0xB0
#define BCM43xx_CHIPCOMMON_FREFSELDELAY 0xB4
#define BCM43xx_CHIPCOMMON_SLOWCLKCTL 0xB8
/* SBTOPCI2 values. */
#define BCM43xx_SBTOPCI2_PREFETCH 0x4
#define BCM43xx_SBTOPCI2_BURST 0x8
+ #define BCM43xx_SBTOPCI2_MEMREAD_MULTI 0x20
+
+ /* PCI-E core registers. */
+ #define BCM43xx_PCIECORE_REG_ADDR 0x0130
+ #define BCM43xx_PCIECORE_REG_DATA 0x0134
+ #define BCM43xx_PCIECORE_MDIO_CTL 0x0128
+ #define BCM43xx_PCIECORE_MDIO_DATA 0x012C
+
+ /* PCI-E registers. */
+ #define BCM43xx_PCIE_TLP_WORKAROUND 0x0004
+ #define BCM43xx_PCIE_DLLP_LINKCTL 0x0100
+
+ /* PCI-E MDIO bits. */
+ #define BCM43xx_PCIE_MDIO_ST 0x40000000
+ #define BCM43xx_PCIE_MDIO_WT 0x10000000
+ #define BCM43xx_PCIE_MDIO_DEV 22
+ #define BCM43xx_PCIE_MDIO_REG 18
+ #define BCM43xx_PCIE_MDIO_TA 0x00020000
+ #define BCM43xx_PCIE_MDIO_TC 0x0100
+
+ /* MDIO devices. */
+ #define BCM43xx_MDIO_SERDES_RX 0x1F
+
+ /* SERDES RX registers. */
+ #define BCM43xx_SERDES_RXTIMER 0x2
+ #define BCM43xx_SERDES_CDR 0x6
+ #define BCM43xx_SERDES_CDR_BW 0x7
/* Chipcommon capabilities. */
#define BCM43xx_CAPABILITIES_PCTL 0x00040000
#define BCM43xx_COREID_USB20_HOST 0x819
#define BCM43xx_COREID_USB20_DEV 0x81a
#define BCM43xx_COREID_SDIO_HOST 0x81b
+ #define BCM43xx_COREID_PCIE 0x820
/* Core Information Registers */
#define BCM43xx_CIR_BASE 0xf00
#define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT 7
#define BCM43xx_DEFAULT_LONG_RETRY_LIMIT 4
+ /* FIXME: the next line is a guess as to what the maximum RSSI value might be */
+ #define RX_RSSI_MAX 60
+
/* Max size of a security key */
#define BCM43xx_SEC_KEYSIZE 16
/* Security algorithms. */
struct tasklet_struct isr_tasklet;
/* Periodic tasks */
- struct work_struct periodic_work;
+ struct delayed_work periodic_work;
unsigned int periodic_state;
struct work_struct restart_work;
{ PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4307 802.11b */
{ PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /* Broadcom 4311 802.11(a)/b/g */
+ { PCI_VENDOR_ID_BROADCOM, 0x4311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /* Broadcom 4312 802.11a/b/g */
+ { PCI_VENDOR_ID_BROADCOM, 0x4312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4318 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4319 802.11a/b/g */
/* fetch sb_id_hi from core information registers */
sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI);
- core_id = (sb_id_hi & 0xFFF0) >> 4;
- core_rev = (sb_id_hi & 0xF);
+ core_id = (sb_id_hi & 0x8FF0) >> 4;
+ core_rev = (sb_id_hi & 0x7000) >> 8;
+ core_rev |= (sb_id_hi & 0xF);
core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
/* if present, chipcommon is always core 0; read the chipid from it */
bcm->chip_id, bcm->chip_rev);
dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count);
if (bcm->core_chipcommon.available) {
- dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
- core_id, core_rev, core_vendor,
- bcm43xx_core_enabled(bcm) ? "enabled" : "disabled");
- }
-
- if (bcm->core_chipcommon.available)
+ dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+ core_id, core_rev, core_vendor);
current_core = 1;
- else
+ } else
current_core = 0;
for ( ; current_core < core_count; current_core++) {
struct bcm43xx_coreinfo *core;
core_rev = (sb_id_hi & 0xF);
core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
- dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
- current_core, core_id, core_rev, core_vendor,
- bcm43xx_core_enabled(bcm) ? "enabled" : "disabled" );
+ dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+ current_core, core_id, core_rev, core_vendor);
core = NULL;
switch (core_id) {
case BCM43xx_COREID_PCI:
+ case BCM43xx_COREID_PCIE:
core = &bcm->core_pci;
if (core->available) {
printk(KERN_WARNING PFX "Multiple PCI cores found.\n");
case 6:
case 7:
case 9:
+ case 10:
break;
default:
- printk(KERN_ERR PFX "Error: Unsupported 80211 core revision %u\n",
+ printk(KERN_WARNING PFX
+ "Unsupported 80211 core revision %u\n",
core_rev);
- err = -ENODEV;
- goto out;
}
bcm->nr_80211_available++;
core->priv = ext_80211;
u32 sbimconfiglow;
u8 limit;
- if (bcm->chip_rev < 5) {
+ if (bcm->core_pci.rev <= 5 && bcm->core_pci.id != BCM43xx_COREID_PCIE) {
sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
- if (bcm->bustype == BCM43xx_BUSTYPE_PCI)
- sbimconfiglow |= 0x32;
- else if (bcm->bustype == BCM43xx_BUSTYPE_SB)
- sbimconfiglow |= 0x53;
- else
- assert(0);
+ sbimconfiglow |= 0x32;
bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow);
}
static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm)
{
- int err;
- struct bcm43xx_coreinfo *old_core;
+ int err = 0;
- old_core = bcm->current_core;
- err = bcm43xx_switch_core(bcm, &bcm->core_pci);
- if (err)
- goto out;
+ bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
- bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+ if (bcm->core_chipcommon.available) {
+ err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
+ if (err)
+ goto out;
+
+ bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+
+ /* this function is always called when a PCI core is mapped */
+ err = bcm43xx_switch_core(bcm, &bcm->core_pci);
+ if (err)
+ goto out;
+ } else
+ bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+
+ bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
- bcm43xx_switch_core(bcm, old_core);
- assert(err == 0);
out:
return err;
}
+ static u32 bcm43xx_pcie_reg_read(struct bcm43xx_private *bcm, u32 address)
+ {
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+ return bcm43xx_read32(bcm, BCM43xx_PCIECORE_REG_DATA);
+ }
+
+ static void bcm43xx_pcie_reg_write(struct bcm43xx_private *bcm, u32 address,
+ u32 data)
+ {
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_DATA, data);
+ }
+
+ static void bcm43xx_pcie_mdio_write(struct bcm43xx_private *bcm, u8 dev, u8 reg,
+ u16 data)
+ {
+ int i;
+
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0x0082);
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_DATA, BCM43xx_PCIE_MDIO_ST |
+ BCM43xx_PCIE_MDIO_WT | (dev << BCM43xx_PCIE_MDIO_DEV) |
+ (reg << BCM43xx_PCIE_MDIO_REG) | BCM43xx_PCIE_MDIO_TA |
+ data);
+ udelay(10);
+
+ for (i = 0; i < 10; i++) {
+ if (bcm43xx_read32(bcm, BCM43xx_PCIECORE_MDIO_CTL) &
+ BCM43xx_PCIE_MDIO_TC)
+ break;
+ msleep(1);
+ }
+ bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0);
+ }
+
/* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable.
* To enable core 0, pass a core_mask of 1<<0
*/
if (err)
goto out;
- if (bcm->core_pci.rev < 6) {
+ if (bcm->current_core->rev < 6 ||
+ bcm->current_core->id == BCM43xx_COREID_PCI) {
value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC);
value |= (1 << backplane_flag_nr);
bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value);
}
}
- value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
- value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
- bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
-
- if (bcm->core_pci.rev < 5) {
- value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
- value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
- & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
- value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
- & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
- bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
- err = bcm43xx_pcicore_commit_settings(bcm);
- assert(err == 0);
+ if (bcm->current_core->id == BCM43xx_COREID_PCI) {
+ value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+ value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
+ bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+
+ if (bcm->current_core->rev < 5) {
+ value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
+ value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
+ & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
+ value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
+ & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
+ bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
+ err = bcm43xx_pcicore_commit_settings(bcm);
+ assert(err == 0);
+ } else if (bcm->current_core->rev >= 11) {
+ value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+ value |= BCM43xx_SBTOPCI2_MEMREAD_MULTI;
+ bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+ }
+ } else {
+ if (bcm->current_core->rev == 0 || bcm->current_core->rev == 1) {
+ value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_TLP_WORKAROUND);
+ value |= 0x8;
+ bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_TLP_WORKAROUND,
+ value);
+ }
+ if (bcm->current_core->rev == 0) {
+ bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+ BCM43xx_SERDES_RXTIMER, 0x8128);
+ bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+ BCM43xx_SERDES_CDR, 0x0100);
+ bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+ BCM43xx_SERDES_CDR_BW, 0x1466);
+ } else if (bcm->current_core->rev == 1) {
+ value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_DLLP_LINKCTL);
+ value |= 0x40;
+ bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_DLLP_LINKCTL,
+ value);
+ }
}
-
out_switch_back:
err = bcm43xx_switch_core(bcm, old_core);
out:
static void do_periodic_work(struct bcm43xx_private *bcm)
{
- unsigned int state;
-
- state = bcm->periodic_state;
- if (state % 8 == 0)
+ if (bcm->periodic_state % 8 == 0)
bcm43xx_periodic_every120sec(bcm);
- if (state % 4 == 0)
+ if (bcm->periodic_state % 4 == 0)
bcm43xx_periodic_every60sec(bcm);
- if (state % 2 == 0)
+ if (bcm->periodic_state % 2 == 0)
bcm43xx_periodic_every30sec(bcm);
- if (state % 1 == 0)
- bcm43xx_periodic_every15sec(bcm);
- bcm->periodic_state = state + 1;
+ bcm43xx_periodic_every15sec(bcm);
schedule_delayed_work(&bcm->periodic_work, HZ * 15);
}
- /* Estimate a "Badness" value based on the periodic work
- * state-machine state. "Badness" is worse (bigger), if the
- * periodic work will take longer.
- */
- static int estimate_periodic_work_badness(unsigned int state)
- {
- int badness = 0;
-
- if (state % 8 == 0) /* every 120 sec */
- badness += 10;
- if (state % 4 == 0) /* every 60 sec */
- badness += 5;
- if (state % 2 == 0) /* every 30 sec */
- badness += 1;
- if (state % 1 == 0) /* every 15 sec */
- badness += 1;
-
- #define BADNESS_LIMIT 4
- return badness;
- }
-
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
{
- struct bcm43xx_private *bcm = d;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, periodic_work.work);
struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
- int badness;
unsigned long orig_trans_start = 0;
mutex_lock(&bcm->mutex);
- badness = estimate_periodic_work_badness(bcm->periodic_state);
- if (badness > BADNESS_LIMIT) {
+ if (unlikely(bcm->periodic_state % 4 == 0)) {
/* Periodic work will take a long time, so we want it to
* be preemtible.
*/
do_periodic_work(bcm);
- if (badness > BADNESS_LIMIT) {
+ if (unlikely(bcm->periodic_state % 4 == 0)) {
spin_lock_irqsave(&bcm->irq_lock, flags);
tasklet_enable(&bcm->isr_tasklet);
bcm43xx_interrupt_enable(bcm, savedirqs);
net_dev->trans_start = orig_trans_start;
}
mmiowb();
+ bcm->periodic_state++;
spin_unlock_irqrestore(&bcm->irq_lock, flags);
mutex_unlock(&bcm->mutex);
}
void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
{
- struct work_struct *work = &(bcm->periodic_work);
+ struct delayed_work *work = &bcm->periodic_work;
assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
- INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
- schedule_work(work);
+ INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+ schedule_delayed_work(work, 0);
}
static void bcm43xx_security_init(struct bcm43xx_private *bcm)
bcm43xx_periodic_tasks_setup(bcm);
/*FIXME: This should be handled by softmac instead. */
- schedule_work(&bcm->softmac->associnfo.work);
+ schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
out:
mutex_unlock(&(bcm)->mutex);
bcm->ieee->freq_band = IEEE80211_24GHZ_BAND;
break;
case BCM43xx_PHYTYPE_G:
- if (phy_rev > 7)
+ if (phy_rev > 8)
phy_rev_ok = 0;
bcm->ieee->modulation = IEEE80211_OFDM_MODULATION |
IEEE80211_CCK_MODULATION;
phy_type);
return -ENODEV;
};
+ bcm->ieee->perfect_rssi = RX_RSSI_MAX;
+ bcm->ieee->worst_rssi = 0;
if (!phy_rev_ok) {
printk(KERN_WARNING PFX "Invalid PHY Revision %x\n",
phy_rev);
return NETDEV_TX_OK;
}
- static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)
- {
- return &(bcm43xx_priv(net_dev)->ieee->stats);
- }
-
static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
{
struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
net_dev->open = bcm43xx_net_open;
net_dev->stop = bcm43xx_net_stop;
- net_dev->get_stats = bcm43xx_net_get_stats;
net_dev->tx_timeout = bcm43xx_net_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
net_dev->poll_controller = bcm43xx_net_poll_controller;
/* Hard-reset the chip. Do not call this directly.
* Use bcm43xx_controller_restart()
*/
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
{
- struct bcm43xx_private *bcm = _bcm;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, restart_work);
struct bcm43xx_phyinfo *phy;
int err = -ENODEV;
if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
return;
printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
- INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+ INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
schedule_work(&bcm->restart_work);
}
struct ipw2100_fw *fw);
static int ipw2100_ucode_download(struct ipw2100_priv *priv,
struct ipw2100_fw *fw);
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+static void ipw2100_wx_event_work(struct work_struct *work);
static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
static struct iw_handler_def ipw2100_wx_handler_def;
queue_delayed_work(priv->workqueue, &priv->reset_work,
priv->reset_backoff * HZ);
else
- queue_work(priv->workqueue, &priv->reset_work);
+ queue_delayed_work(priv->workqueue, &priv->reset_work,
+ 0);
if (priv->reset_backoff < MAX_RESET_BACKOFF)
priv->reset_backoff++;
netif_stop_queue(priv->net_dev);
}
-static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+static void ipw2100_reset_adapter(struct work_struct *work)
{
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, reset_work.work);
unsigned long flags;
union iwreq_data wrqu = {
.ap_addr = {
return;
if (priv->status & STATUS_SECURITY_UPDATED)
- queue_work(priv->workqueue, &priv->security_work);
+ queue_delayed_work(priv->workqueue, &priv->security_work, 0);
- queue_work(priv->workqueue, &priv->wx_event_work);
+ queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
}
static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
return err;
}
-static void ipw2100_security_work(struct ipw2100_priv *priv)
+static void ipw2100_security_work(struct work_struct *work)
{
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, security_work.work);
+
/* If we happen to have reconnected before we get a chance to
* process this, then update the security settings--which causes
* a disassociation to occur */
priv->reset_backoff = 0;
mutex_unlock(&priv->action_mutex);
- ipw2100_reset_adapter(priv);
+ ipw2100_reset_adapter(&priv->reset_work.work);
return 0;
done:
schedule_reset(priv);
}
- /*
- * TODO: reimplement it so that it reads statistics
- * from the adapter using ordinal tables
- * instead of/in addition to collecting them
- * in the driver
- */
- static struct net_device_stats *ipw2100_stats(struct net_device *dev)
- {
- struct ipw2100_priv *priv = ieee80211_priv(dev);
-
- return &priv->ieee->stats;
- }
-
static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
{
/* This is called when wpa_supplicant loads and closes the driver
.get_drvinfo = ipw_ethtool_get_drvinfo,
};
-static void ipw2100_hang_check(void *adapter)
+static void ipw2100_hang_check(struct work_struct *work)
{
- struct ipw2100_priv *priv = adapter;
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, hang_check.work);
unsigned long flags;
u32 rtc = 0xa5a5a5a5;
u32 len = sizeof(rtc);
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_rf_kill(void *adapter)
+static void ipw2100_rf_kill(struct work_struct *work)
{
- struct ipw2100_priv *priv = adapter;
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, rf_kill.work);
unsigned long flags;
spin_lock_irqsave(&priv->low_lock, flags);
dev->open = ipw2100_open;
dev->stop = ipw2100_close;
dev->init = ipw2100_net_init;
- dev->get_stats = ipw2100_stats;
dev->ethtool_ops = &ipw2100_ethtool_ops;
dev->tx_timeout = ipw2100_tx_timeout;
dev->wireless_handlers = &ipw2100_wx_handler_def;
priv->workqueue = create_workqueue(DRV_NAME);
- INIT_WORK(&priv->reset_work,
- (void (*)(void *))ipw2100_reset_adapter, priv);
- INIT_WORK(&priv->security_work,
- (void (*)(void *))ipw2100_security_work, priv);
- INIT_WORK(&priv->wx_event_work,
- (void (*)(void *))ipw2100_wx_event_work, priv);
- INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
- INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+ INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+ INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+ INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+ INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+ INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw2100_irq_tasklet, (unsigned long)priv);
{
struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
struct net_device *dev = priv->net_dev;
+ int err;
u32 val;
if (IPW2100_PM_DISABLED)
IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
pci_set_power_state(pci_dev, PCI_D0);
- pci_enable_device(pci_dev);
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
pci_restore_state(pci_dev);
/*
return -EINVAL;
if (wrqu->data.length) {
- buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+ buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, extra, wrqu->data.length);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = wrqu->data.length;
.get_wireless_stats = ipw2100_wx_wireless_stats,
};
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+static void ipw2100_wx_event_work(struct work_struct *work)
{
+ struct ipw2100_priv *priv =
+ container_of(work, struct ipw2100_priv, wx_event_work.work);
union iwreq_data wrqu;
int len = ETH_ALEN;
static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
static void ipw_rx_queue_replenish(void *);
static int ipw_up(struct ipw_priv *);
-static void ipw_bg_up(void *);
+static void ipw_bg_up(struct work_struct *work);
static void ipw_down(struct ipw_priv *);
-static void ipw_bg_down(void *);
+static void ipw_bg_down(struct work_struct *work);
static int ipw_config(struct ipw_priv *);
static int init_supported_rates(struct ipw_priv *priv,
struct ipw_supported_rates *prates);
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_led_link_on(void *data)
+static void ipw_bg_led_link_on(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, led_link_on.work);
mutex_lock(&priv->mutex);
- ipw_led_link_on(data);
+ ipw_led_link_on(priv);
mutex_unlock(&priv->mutex);
}
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_led_link_off(void *data)
+static void ipw_bg_led_link_off(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, led_link_off.work);
mutex_lock(&priv->mutex);
- ipw_led_link_off(data);
+ ipw_led_link_off(priv);
mutex_unlock(&priv->mutex);
}
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_led_activity_off(void *data)
+static void ipw_bg_led_activity_off(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, led_act_off.work);
mutex_lock(&priv->mutex);
- ipw_led_activity_off(data);
+ ipw_led_activity_off(priv);
mutex_unlock(&priv->mutex);
}
}
}
-static void ipw_bg_adapter_restart(void *data)
+static void ipw_bg_adapter_restart(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, adapter_restart);
mutex_lock(&priv->mutex);
- ipw_adapter_restart(data);
+ ipw_adapter_restart(priv);
mutex_unlock(&priv->mutex);
}
}
}
-static void ipw_bg_scan_check(void *data)
+static void ipw_bg_scan_check(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, scan_check.work);
mutex_lock(&priv->mutex);
- ipw_scan_check(data);
+ ipw_scan_check(priv);
mutex_unlock(&priv->mutex);
}
return 1;
}
-static void ipw_bg_disassociate(void *data)
+static void ipw_bg_disassociate(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, disassociate);
mutex_lock(&priv->mutex);
- ipw_disassociate(data);
+ ipw_disassociate(priv);
mutex_unlock(&priv->mutex);
}
-static void ipw_system_config(void *data)
+static void ipw_system_config(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, system_config);
#ifdef CONFIG_IPW2200_PROMISCUOUS
if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
IPW_STATS_INTERVAL);
}
-static void ipw_bg_gather_stats(void *data)
+static void ipw_bg_gather_stats(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, gather_stats.work);
mutex_lock(&priv->mutex);
- ipw_gather_stats(data);
+ ipw_gather_stats(priv);
mutex_unlock(&priv->mutex);
}
if (!(priv->status & STATUS_ROAMING)) {
priv->status |= STATUS_ROAMING;
if (!(priv->status & STATUS_SCANNING))
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
}
return;
}
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
priv->status |= STATUS_SCAN_FORCED;
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
break;
}
priv->status &= ~STATUS_SCAN_FORCED;
/* Don't schedule if we aborted the scan */
priv->status &= ~STATUS_ROAMING;
} else if (priv->status & STATUS_SCAN_PENDING)
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
queue_delayed_work(priv->workqueue,
ipw_rx_queue_restock(priv);
}
-static void ipw_bg_rx_queue_replenish(void *data)
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, rx_replenish);
mutex_lock(&priv->mutex);
- ipw_rx_queue_replenish(data);
+ ipw_rx_queue_replenish(priv);
mutex_unlock(&priv->mutex);
}
return 1;
}
-static void ipw_merge_adhoc_network(void *data)
+static void ipw_merge_adhoc_network(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, merge_networks);
struct ieee80211_network *network = NULL;
struct ipw_network_match match = {
.network = priv->assoc_network
priv->assoc_request.beacon_interval);
}
-static void ipw_bg_adhoc_check(void *data)
+static void ipw_bg_adhoc_check(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, adhoc_check.work);
mutex_lock(&priv->mutex);
- ipw_adhoc_check(data);
+ ipw_adhoc_check(priv);
mutex_unlock(&priv->mutex);
}
return err;
}
-static int ipw_request_passive_scan(struct ipw_priv *priv) {
- return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+static void ipw_request_passive_scan(struct work_struct *work)
+{
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, request_passive_scan);
+ ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
}
-static int ipw_request_scan(struct ipw_priv *priv) {
- return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+static void ipw_request_scan(struct work_struct *work)
+{
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, request_scan.work);
+ ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
}
-static void ipw_bg_abort_scan(void *data)
+static void ipw_bg_abort_scan(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, abort_scan);
mutex_lock(&priv->mutex);
- ipw_abort_scan(data);
+ ipw_abort_scan(priv);
mutex_unlock(&priv->mutex);
}
}
/*
- * handling the beaconing responces. if we get different QoS setting
- * of the network from the the associated setting adjust the QoS
+ * handling the beaconing responses. if we get different QoS setting
+ * off the network from the associated setting, adjust the QoS
* setting
*/
static int ipw_qos_association_resp(struct ipw_priv *priv,
/*
* background support to run QoS activate functionality
*/
-static void ipw_bg_qos_activate(void *data)
+static void ipw_bg_qos_activate(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, qos_activate);
if (priv == NULL)
return;
priv->status &= ~STATUS_ROAMING;
}
-static void ipw_bg_roam(void *data)
+static void ipw_bg_roam(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, roam);
mutex_lock(&priv->mutex);
- ipw_roam(data);
+ ipw_roam(priv);
mutex_unlock(&priv->mutex);
}
&priv->request_scan,
SCAN_INTERVAL);
else
- queue_work(priv->workqueue,
- &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
}
return 0;
return 1;
}
-static void ipw_bg_associate(void *data)
+static void ipw_bg_associate(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, associate);
mutex_lock(&priv->mutex);
- ipw_associate(data);
+ ipw_associate(priv);
mutex_unlock(&priv->mutex);
}
IPW_DEBUG_WX("Start scan\n");
- queue_work(priv->workqueue, &priv->request_scan);
+ queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipw_bg_rf_kill(void *data)
+static void ipw_bg_rf_kill(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, rf_kill.work);
mutex_lock(&priv->mutex);
- ipw_rf_kill(data);
+ ipw_rf_kill(priv);
mutex_unlock(&priv->mutex);
}
queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
}
-static void ipw_bg_link_up(void *data)
+static void ipw_bg_link_up(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, link_up);
mutex_lock(&priv->mutex);
- ipw_link_up(data);
+ ipw_link_up(priv);
mutex_unlock(&priv->mutex);
}
if (!(priv->status & STATUS_EXIT_PENDING)) {
/* Queue up another scan... */
- queue_work(priv->workqueue, &priv->request_scan);
+ queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
}
}
-static void ipw_bg_link_down(void *data)
+static void ipw_bg_link_down(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, link_down);
mutex_lock(&priv->mutex);
- ipw_link_down(data);
+ ipw_link_down(priv);
mutex_unlock(&priv->mutex);
}
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
- INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
- INIT_WORK(&priv->associate, ipw_bg_associate, priv);
- INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
- INIT_WORK(&priv->system_config, ipw_system_config, priv);
- INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
- INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
- INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
- INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
- INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
- INIT_WORK(&priv->request_scan,
- (void (*)(void *))ipw_request_scan, priv);
- INIT_WORK(&priv->request_passive_scan,
- (void (*)(void *))ipw_request_passive_scan, priv);
- INIT_WORK(&priv->gather_stats,
- (void (*)(void *))ipw_bg_gather_stats, priv);
- INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
- INIT_WORK(&priv->roam, ipw_bg_roam, priv);
- INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
- INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
- INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
- INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
- priv);
- INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
- priv);
- INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
- priv);
- INIT_WORK(&priv->merge_networks,
- (void (*)(void *))ipw_merge_adhoc_network, priv);
+ INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+ INIT_WORK(&priv->associate, ipw_bg_associate);
+ INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+ INIT_WORK(&priv->system_config, ipw_system_config);
+ INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+ INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+ INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+ INIT_WORK(&priv->up, ipw_bg_up);
+ INIT_WORK(&priv->down, ipw_bg_down);
+ INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+ INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+ INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+ INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+ INIT_WORK(&priv->roam, ipw_bg_roam);
+ INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+ INIT_WORK(&priv->link_up, ipw_bg_link_up);
+ INIT_WORK(&priv->link_down, ipw_bg_link_down);
+ INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+ INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+ INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+ INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
#ifdef CONFIG_IPW2200_QOS
- INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
- priv);
+ INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
/* If configure to try and auto-associate, kick
* off a scan. */
- queue_work(priv->workqueue, &priv->request_scan);
+ queue_delayed_work(priv->workqueue,
+ &priv->request_scan, 0);
return 0;
}
return -EIO;
}
-static void ipw_bg_up(void *data)
+static void ipw_bg_up(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, up);
mutex_lock(&priv->mutex);
- ipw_up(data);
+ ipw_up(priv);
mutex_unlock(&priv->mutex);
}
ipw_led_radio_off(priv);
}
-static void ipw_bg_down(void *data)
+static void ipw_bg_down(struct work_struct *work)
{
- struct ipw_priv *priv = data;
+ struct ipw_priv *priv =
+ container_of(work, struct ipw_priv, down);
mutex_lock(&priv->mutex);
- ipw_down(data);
+ ipw_down(priv);
mutex_unlock(&priv->mutex);
}
{
struct ipw_priv *priv = pci_get_drvdata(pdev);
struct net_device *dev = priv->net_dev;
+ int err;
u32 val;
printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
pci_set_power_state(pdev, PCI_D0);
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+ dev->name);
+ return err;
+ }
pci_restore_state(pdev);
/*
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* prism54_mib_mode_helper - MIB change mode helper function
* @mib: the &struct islpci_mib object to modify
* @iw_mode: new mode (%IW_MODE_*)
- *
+ *
* This is a helper function, hence it does not lock. Make sure
- * caller deals with locking *if* necessary. This function sets the
- * mode-dependent mib values and does the mapping of the Linux
- * Wireless API modes to Device firmware modes. It also checks for
- * correct valid Linux wireless modes.
+ * caller deals with locking *if* necessary. This function sets the
+ * mode-dependent mib values and does the mapping of the Linux
+ * Wireless API modes to Device firmware modes. It also checks for
+ * correct valid Linux wireless modes.
*/
static int
prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
*
* this function initializes the struct given as @mib with defaults,
* of which many are retrieved from the global module parameter
- * variables.
+ * variables.
*/
void
authen = CARD_DEFAULT_AUTHEN;
wep = CARD_DEFAULT_WEP;
filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */
- dot1x = CARD_DEFAULT_DOT1X;
+ dot1x = CARD_DEFAULT_DOT1X;
mlme = CARD_DEFAULT_MLME_MODE;
conformance = CARD_DEFAULT_CONFORMANCE;
power = 127;
* schedule_work(), thus we can as well use sleeping semaphore
* locking */
void
-prism54_update_stats(islpci_private *priv)
+prism54_update_stats(struct work_struct *work)
{
+ islpci_private *priv = container_of(work, islpci_private, stats_work);
char *data;
int j;
struct obj_bss bss, *bss2;
} else
priv->iwstatistics.qual.updated = 0;
- /* Update our wireless stats, but do not schedule to often
+ /* Update our wireless stats, but do not schedule to often
* (max 1 HZ) */
if ((priv->stats_timestamp == 0) ||
time_after(jiffies, priv->stats_timestamp + 1 * HZ)) {
* Starting with WE-17, the buffer can be as big as needed.
* But the device won't repport anything if you change the value
* of IWMAX_BSS=24. */
-
+
rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
bsslist = r.ptr;
return rvalue;
}
- /* Provides no functionality, just completes the ioctl. In essence this is a
+ /* Provides no functionality, just completes the ioctl. In essence this is a
* just a cosmetic ioctl.
*/
static int
&key);
}
/*
- * If a valid key is set, encryption should be enabled
+ * If a valid key is set, encryption should be enabled
* (user may turn it off later).
* This is also how "iwconfig ethX key on" works
*/
}
/* now read the flags */
if (dwrq->flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
+ /* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
vwrq->value = (s32) r.u / 4;
vwrq->fixed = 1;
/* radio is not turned of
- * btw: how is possible to turn off only the radio
+ * btw: how is possible to turn off only the radio
*/
vwrq->disabled = 0;
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Authenticate request (ex)", mlme, 1);
- if (priv->iw_mode != IW_MODE_MASTER
+ if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_AUTHING)
break;
confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC);
- if (!confirm)
+ if (!confirm)
break;
memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
- printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
mlmeex->address[0],
mlmeex->address[1],
mlmeex->address[2],
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Associate request (ex)", mlme, 1);
- if (priv->iw_mode != IW_MODE_MASTER
+ if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_ASSOCING)
break;
-
+
confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
if (!confirm)
if (!wpa_ie_len) {
printk(KERN_DEBUG "No WPA IE found from "
- "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
mlmeex->address[0],
mlmeex->address[1],
mlmeex->address[2],
mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
kfree(confirm);
-
+
break;
case DOT11_OID_REASSOCIATEEX:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Reassociate request (ex)", mlme, 1);
- if (priv->iw_mode != IW_MODE_MASTER
+ if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_ASSOCING)
break;
if (!wpa_ie_len) {
printk(KERN_DEBUG "No WPA IE found from "
- "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
+ "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
mlmeex->address[0],
mlmeex->address[1],
mlmeex->address[2],
break;
}
- confirm->size = wpa_ie_len;
+ confirm->size = wpa_ie_len;
memcpy(&confirm->data, wpa_ie, wpa_ie_len);
mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
kfree(confirm);
-
+
break;
default:
* interrupt context, no locks held.
*/
void
-prism54_process_trap(void *data)
+prism54_process_trap(struct work_struct *work)
{
- struct islpci_mgmtframe *frame = data;
+ struct islpci_mgmtframe *frame =
+ container_of(work, struct islpci_mgmtframe, ws);
struct net_device *ndev = frame->ndev;
enum oid_num_t n = mgt_oidtonum(frame->header->oid);
#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
- /* Maximum length for algorithm names (-1 for nul termination)
+ /* Maximum length for algorithm names (-1 for nul termination)
* used in ioctl() */
#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
+
struct prism2_hostapd_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
&key);
}
/*
- * If a valid key is set, encryption should be enabled
+ * If a valid key is set, encryption should be enabled
* (user may turn it off later).
* This is also how "iwconfig ethX key on" works
*/
}
/* now read the flags */
if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
+ /* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
- if (ret == 0)
+ if (ret == 0)
printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
ndev->name);
}
mlme = DOT11_MLME_AUTO;
printk("%s: Disabling WPA\n", ndev->name);
break;
- case 2:
+ case 2:
case 1: /* WPA */
printk("%s: Enabling WPA\n", ndev->name);
break;
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
void prism54_mib_init(islpci_private *);
struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
-void prism54_update_stats(islpci_private *);
+void prism54_update_stats(struct work_struct *);
void prism54_acl_init(struct islpci_acl *);
void prism54_acl_clean(struct islpci_acl *);
-void prism54_process_trap(void *);
+void prism54_process_trap(struct work_struct *);
void prism54_wpa_bss_ie_init(islpci_private *priv);
void prism54_wpa_bss_ie_clean(islpci_private *priv);
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
islpci_set_state(priv, PRV_STATE_PREBOOT);
/* disable all device interrupts in case they weren't */
- isl38xx_disable_interrupts(priv->device_base);
+ isl38xx_disable_interrupts(priv->device_base);
/* For safety reasons, we may want to ensure that no DMA transfer is
* currently in progress by emptying the TX and RX queues. */
DEFINE_WAIT(wait);
prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
-
+
/* now the last step is to reset the interface */
isl38xx_interface_reset(priv->device_base, priv->device_host_address);
islpci_set_state(priv, PRV_STATE_PREINIT);
for(count = 0; count < 2 && result; count++) {
/* The software reset acknowledge needs about 220 msec here.
* Be conservative and wait for up to one second. */
-
+
remaining = schedule_timeout_uninterruptible(HZ);
if(remaining > 0) {
break;
}
- /* If we're here it's because our IRQ hasn't yet gone through.
+ /* If we're here it's because our IRQ hasn't yet gone through.
* Retry a bit more...
*/
printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
/* Now that the device is 100% up, let's allow
* for the other interrupts --
- * NOTE: this is not *yet* true since we've only allowed the
+ * NOTE: this is not *yet* true since we've only allowed the
* INIT interrupt on the IRQ line. We can perhaps poll
* the IRQ line until we know for sure the reset went through */
isl38xx_enable_common_interrupts(priv->device_base);
prism54_acl_init(&priv->acl);
prism54_wpa_bss_ie_init(priv);
- if (mgt_init(priv))
+ if (mgt_init(priv))
goto out_free;
return 0;
priv->state_off = 1;
/* initialize workqueue's */
- INIT_WORK(&priv->stats_work,
- (void (*)(void *)) prism54_update_stats, priv);
+ INIT_WORK(&priv->stats_work, prism54_update_stats);
priv->stats_timestamp = 0;
- INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+ INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
priv->reset_task_pending = 0;
/* allocate various memory areas */
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
* This program is free software; you can redistribute it and/or modify
/* read the index of the first fragment to be freed */
index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
- /* check for holes in the arrays caused by multi fragment frames
+ /* check for holes in the arrays caused by multi fragment frames
* searching for the last fragment of a frame */
if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) {
/* entry is the last fragment of a frame
* header and without the FCS. But there a is a bit that
* indicates if the packet is corrupted :-) */
struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
+
if (hdr->flags & 0x01)
/* This one is bad. Drop it ! */
return -1;
(struct avs_80211_1_header *) skb_push(*skb,
sizeof (struct
avs_80211_1_header));
-
+
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
avs->mactime = cpu_to_be64(le64_to_cpu(clock));
struct rx_annex_header *annex =
(struct rx_annex_header *) skb->data;
wstats.level = annex->rfmon.rssi;
- /* The noise value can be a bit outdated if nobody's
+ /* The noise value can be a bit outdated if nobody's
* reading wireless stats... */
wstats.noise = priv->local_iwstatistics.qual.noise;
wstats.qual = wstats.level - wstats.noise;
break;
}
/* update the fragment address */
- control_block->rx_data_low[index].address = cpu_to_le32((u32)
- priv->
- pci_map_rx_address
- [index]);
+ control_block->rx_data_low[index].address =
+ cpu_to_le32((u32)priv->pci_map_rx_address[index]);
wmb();
/* increment the driver read pointer */
}
void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
{
- islpci_private *priv = data;
+ islpci_private *priv = container_of(work, islpci_private, reset_task);
+
islpci_reset(priv, 1);
- netif_wake_queue(priv->ndev);
priv->reset_task_pending = 0;
+ smp_wmb();
+ netif_wake_queue(priv->ndev);
}
void
/* increment the transmit error counter */
statistics->tx_errors++;
- printk(KERN_WARNING "%s: tx_timeout", ndev->name);
if (!priv->reset_task_pending) {
- priv->reset_task_pending = 1;
- printk(", scheduling a reset");
+ printk(KERN_WARNING
+ "%s: tx_timeout, scheduling reset", ndev->name);
netif_stop_queue(ndev);
+ priv->reset_task_pending = 1;
schedule_work(&priv->reset_task);
+ } else {
+ printk(KERN_WARNING
+ "%s: tx_timeout, waiting for reset", ndev->name);
}
- printk("\n");
}
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
*
* This program is free software; you can redistribute it and/or modify
int islpci_eth_transmit(struct sk_buff *, struct net_device *);
int islpci_eth_receive(islpci_private *);
void islpci_eth_tx_timeout(struct net_device *);
-void islpci_do_reset_and_wake(void *data);
+void islpci_do_reset_and_wake(struct work_struct *);
#endif /* _ISL_GEN_H */
/*
- *
* Copyright (C) 2002 Intersil Americas Inc.
*
/* Create work to handle trap out of interrupt
* context. */
- INIT_WORK(&frame->ws, prism54_process_trap, frame);
+ INIT_WORK(&frame->ws, prism54_process_trap);
schedule_work(&frame->ws);
} else {
printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
ndev->name);
- /* TODO: we should reset the device here */
+ /* TODO: we should reset the device here */
out:
finish_wait(&priv->mgmt_wqueue, &wait);
up(&priv->mgmt_sem);
static void ieee_init(struct ieee80211_device *ieee);
static void softmac_init(struct ieee80211softmac_device *sm);
+ static void set_rts_cts_work(void *d);
+ static void set_basic_rates_work(void *d);
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
memset(mac, 0, sizeof(*mac));
spin_lock_init(&mac->lock);
mac->netdev = netdev;
+ INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
+ INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
ieee_init(ieee);
softmac_init(ieee80211_priv(netdev));
housekeeping_disable(mac);
ieee80211softmac_stop(netdev);
+ /* Ensure no work items are running or queued from this point */
+ cancel_delayed_work(&mac->set_rts_cts_work);
+ cancel_delayed_work(&mac->set_basic_rates_work);
+ flush_workqueue(zd_workqueue);
+ mac->updating_rts_rate = 0;
+ mac->updating_basic_rates = 0;
+
zd_chip_disable_hwint(chip);
zd_chip_switch_radio_off(chip);
zd_chip_disable_int(chip);
return regdomain;
}
+ /* Fallback to lowest rate, if rate is unknown. */
+ static u8 rate_to_zd_rate(u8 rate)
+ {
+ switch (rate) {
+ case IEEE80211_CCK_RATE_2MB:
+ return ZD_CCK_RATE_2M;
+ case IEEE80211_CCK_RATE_5MB:
+ return ZD_CCK_RATE_5_5M;
+ case IEEE80211_CCK_RATE_11MB:
+ return ZD_CCK_RATE_11M;
+ case IEEE80211_OFDM_RATE_6MB:
+ return ZD_OFDM_RATE_6M;
+ case IEEE80211_OFDM_RATE_9MB:
+ return ZD_OFDM_RATE_9M;
+ case IEEE80211_OFDM_RATE_12MB:
+ return ZD_OFDM_RATE_12M;
+ case IEEE80211_OFDM_RATE_18MB:
+ return ZD_OFDM_RATE_18M;
+ case IEEE80211_OFDM_RATE_24MB:
+ return ZD_OFDM_RATE_24M;
+ case IEEE80211_OFDM_RATE_36MB:
+ return ZD_OFDM_RATE_36M;
+ case IEEE80211_OFDM_RATE_48MB:
+ return ZD_OFDM_RATE_48M;
+ case IEEE80211_OFDM_RATE_54MB:
+ return ZD_OFDM_RATE_54M;
+ }
+ return ZD_CCK_RATE_1M;
+ }
+
+ static u16 rate_to_cr_rate(u8 rate)
+ {
+ switch (rate) {
+ case IEEE80211_CCK_RATE_2MB:
+ return CR_RATE_1M;
+ case IEEE80211_CCK_RATE_5MB:
+ return CR_RATE_5_5M;
+ case IEEE80211_CCK_RATE_11MB:
+ return CR_RATE_11M;
+ case IEEE80211_OFDM_RATE_6MB:
+ return CR_RATE_6M;
+ case IEEE80211_OFDM_RATE_9MB:
+ return CR_RATE_9M;
+ case IEEE80211_OFDM_RATE_12MB:
+ return CR_RATE_12M;
+ case IEEE80211_OFDM_RATE_18MB:
+ return CR_RATE_18M;
+ case IEEE80211_OFDM_RATE_24MB:
+ return CR_RATE_24M;
+ case IEEE80211_OFDM_RATE_36MB:
+ return CR_RATE_36M;
+ case IEEE80211_OFDM_RATE_48MB:
+ return CR_RATE_48M;
+ case IEEE80211_OFDM_RATE_54MB:
+ return CR_RATE_54M;
+ }
+ return CR_RATE_1M;
+ }
+
+ static void try_enable_tx(struct zd_mac *mac)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (mac->updating_rts_rate == 0 && mac->updating_basic_rates == 0)
+ netif_wake_queue(mac->netdev);
+ spin_unlock_irqrestore(&mac->lock, flags);
+ }
+
+ static void set_rts_cts_work(void *d)
+ {
+ struct zd_mac *mac = d;
+ unsigned long flags;
+ u8 rts_rate;
+ unsigned int short_preamble;
+
+ mutex_lock(&mac->chip.mutex);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->updating_rts_rate = 0;
+ rts_rate = mac->rts_rate;
+ short_preamble = mac->short_preamble;
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ zd_chip_set_rts_cts_rate_locked(&mac->chip, rts_rate, short_preamble);
+ mutex_unlock(&mac->chip.mutex);
+
+ try_enable_tx(mac);
+ }
+
+ static void set_basic_rates_work(void *d)
+ {
+ struct zd_mac *mac = d;
+ unsigned long flags;
+ u16 basic_rates;
+
+ mutex_lock(&mac->chip.mutex);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->updating_basic_rates = 0;
+ basic_rates = mac->basic_rates;
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ zd_chip_set_basic_rates_locked(&mac->chip, basic_rates);
+ mutex_unlock(&mac->chip.mutex);
+
+ try_enable_tx(mac);
+ }
+
+ static void bssinfo_change(struct net_device *netdev, u32 changes)
+ {
+ struct zd_mac *mac = zd_netdev_mac(netdev);
+ struct ieee80211softmac_device *softmac = ieee80211_priv(netdev);
+ struct ieee80211softmac_bss_info *bssinfo = &softmac->bssinfo;
+ int need_set_rts_cts = 0;
+ int need_set_rates = 0;
+ u16 basic_rates;
+ unsigned long flags;
+
+ dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+
+ if (changes & IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE) {
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->short_preamble = bssinfo->short_preamble;
+ spin_unlock_irqrestore(&mac->lock, flags);
+ need_set_rts_cts = 1;
+ }
+
+ if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) {
+ /* Set RTS rate to highest available basic rate */
+ u8 rate = ieee80211softmac_highest_supported_rate(softmac,
+ &bssinfo->supported_rates, 1);
+ rate = rate_to_zd_rate(rate);
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (rate != mac->rts_rate) {
+ mac->rts_rate = rate;
+ need_set_rts_cts = 1;
+ }
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ /* Set basic rates */
+ need_set_rates = 1;
+ if (bssinfo->supported_rates.count == 0) {
+ /* Allow the device to be flexible */
+ basic_rates = CR_RATES_80211B | CR_RATES_80211G;
+ } else {
+ int i = 0;
+ basic_rates = 0;
+
+ for (i = 0; i < bssinfo->supported_rates.count; i++) {
+ u16 rate = bssinfo->supported_rates.rates[i];
+ if ((rate & IEEE80211_BASIC_RATE_MASK) == 0)
+ continue;
+
+ rate &= ~IEEE80211_BASIC_RATE_MASK;
+ basic_rates |= rate_to_cr_rate(rate);
+ }
+ }
+ spin_lock_irqsave(&mac->lock, flags);
+ mac->basic_rates = basic_rates;
+ spin_unlock_irqrestore(&mac->lock, flags);
+ }
+
+ /* Schedule any changes we made above */
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (need_set_rts_cts && !mac->updating_rts_rate) {
+ mac->updating_rts_rate = 1;
+ netif_stop_queue(mac->netdev);
+ queue_work(zd_workqueue, &mac->set_rts_cts_work);
+ }
+ if (need_set_rates && !mac->updating_basic_rates) {
+ mac->updating_basic_rates = 1;
+ netif_stop_queue(mac->netdev);
+ queue_work(zd_workqueue, &mac->set_basic_rates_work);
+ }
+ spin_unlock_irqrestore(&mac->lock, flags);
+ }
+
static void set_channel(struct net_device *netdev, u8 channel)
{
struct zd_mac *mac = zd_netdev_mac(netdev);
zd_chip_set_channel(&mac->chip, channel);
}
- /* TODO: Should not work in Managed mode. */
int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
{
unsigned long lock_flags;
return 0;
}
- int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags)
+ u8 zd_mac_get_channel(struct zd_mac *mac)
{
- struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+ u8 channel = zd_chip_get_channel(&mac->chip);
- *channel = zd_chip_get_channel(&mac->chip);
- if (ieee->iw_mode != IW_MODE_INFRA) {
- spin_lock_irq(&mac->lock);
- *flags = *channel == mac->requested_channel ?
- MAC_FIXED_CHANNEL : 0;
- spin_unlock(&mac->lock);
- } else {
- *flags = 0;
- }
- dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags);
- return 0;
+ dev_dbg_f(zd_mac_dev(mac), "channel %u\n", channel);
+ return channel;
}
/* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */
- static u8 cs_typed_rate(u8 cs_rate)
+ static u8 zd_rate_typed(u8 zd_rate)
{
static const u8 typed_rates[16] = {
- [ZD_CS_CCK_RATE_1M] = ZD_CS_CCK|ZD_CS_CCK_RATE_1M,
- [ZD_CS_CCK_RATE_2M] = ZD_CS_CCK|ZD_CS_CCK_RATE_2M,
- [ZD_CS_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M,
- [ZD_CS_CCK_RATE_11M] = ZD_CS_CCK|ZD_CS_CCK_RATE_11M,
+ [ZD_CCK_RATE_1M] = ZD_CS_CCK|ZD_CCK_RATE_1M,
+ [ZD_CCK_RATE_2M] = ZD_CS_CCK|ZD_CCK_RATE_2M,
+ [ZD_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CCK_RATE_5_5M,
+ [ZD_CCK_RATE_11M] = ZD_CS_CCK|ZD_CCK_RATE_11M,
[ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M,
[ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M,
[ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M,
};
ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f);
- return typed_rates[cs_rate & ZD_CS_RATE_MASK];
- }
-
- /* Fallback to lowest rate, if rate is unknown. */
- static u8 rate_to_cs_rate(u8 rate)
- {
- switch (rate) {
- case IEEE80211_CCK_RATE_2MB:
- return ZD_CS_CCK_RATE_2M;
- case IEEE80211_CCK_RATE_5MB:
- return ZD_CS_CCK_RATE_5_5M;
- case IEEE80211_CCK_RATE_11MB:
- return ZD_CS_CCK_RATE_11M;
- case IEEE80211_OFDM_RATE_6MB:
- return ZD_OFDM_RATE_6M;
- case IEEE80211_OFDM_RATE_9MB:
- return ZD_OFDM_RATE_9M;
- case IEEE80211_OFDM_RATE_12MB:
- return ZD_OFDM_RATE_12M;
- case IEEE80211_OFDM_RATE_18MB:
- return ZD_OFDM_RATE_18M;
- case IEEE80211_OFDM_RATE_24MB:
- return ZD_OFDM_RATE_24M;
- case IEEE80211_OFDM_RATE_36MB:
- return ZD_OFDM_RATE_36M;
- case IEEE80211_OFDM_RATE_48MB:
- return ZD_OFDM_RATE_48M;
- case IEEE80211_OFDM_RATE_54MB:
- return ZD_OFDM_RATE_54M;
- }
- return ZD_CS_CCK_RATE_1M;
+ return typed_rates[zd_rate & ZD_CS_RATE_MASK];
}
int zd_mac_set_mode(struct zd_mac *mac, u32 mode)
return 0;
}
- static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
+ static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
{
static const u8 rate_divisor[] = {
- [ZD_CS_CCK_RATE_1M] = 1,
- [ZD_CS_CCK_RATE_2M] = 2,
- [ZD_CS_CCK_RATE_5_5M] = 11, /* bits must be doubled */
- [ZD_CS_CCK_RATE_11M] = 11,
+ [ZD_CCK_RATE_1M] = 1,
+ [ZD_CCK_RATE_2M] = 2,
+ [ZD_CCK_RATE_5_5M] = 11, /* bits must be doubled */
+ [ZD_CCK_RATE_11M] = 11,
[ZD_OFDM_RATE_6M] = 6,
[ZD_OFDM_RATE_9M] = 9,
[ZD_OFDM_RATE_12M] = 12,
u32 bits = (u32)tx_length * 8;
u32 divisor;
- divisor = rate_divisor[cs_rate];
+ divisor = rate_divisor[zd_rate];
if (divisor == 0)
return -EINVAL;
- switch (cs_rate) {
- case ZD_CS_CCK_RATE_5_5M:
+ switch (zd_rate) {
+ case ZD_CCK_RATE_5_5M:
bits = (2*bits) + 10; /* round up to the next integer */
break;
- case ZD_CS_CCK_RATE_11M:
+ case ZD_CCK_RATE_11M:
if (service) {
u32 t = bits % 11;
*service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
R2M_11A = 0x02,
};
- static u8 cs_rate_to_modulation(u8 cs_rate, int flags)
+ static u8 zd_rate_to_modulation(u8 zd_rate, int flags)
{
u8 modulation;
- modulation = cs_typed_rate(cs_rate);
+ modulation = zd_rate_typed(zd_rate);
if (flags & R2M_SHORT_PREAMBLE) {
switch (ZD_CS_RATE(modulation)) {
- case ZD_CS_CCK_RATE_2M:
- case ZD_CS_CCK_RATE_5_5M:
- case ZD_CS_CCK_RATE_11M:
+ case ZD_CCK_RATE_2M:
+ case ZD_CCK_RATE_5_5M:
+ case ZD_CCK_RATE_11M:
modulation |= ZD_CS_CCK_PREA_SHORT;
return modulation;
}
{
struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl));
- u8 rate, cs_rate;
+ u8 rate, zd_rate;
int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0;
+ int is_multicast = is_multicast_ether_addr(hdr->addr1);
+ int short_preamble = ieee80211softmac_short_preamble_ok(softmac,
+ is_multicast, is_mgt);
+ int flags = 0;
+
+ /* FIXME: 802.11a? */
+ rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt);
- /* FIXME: 802.11a? short preamble? */
- rate = ieee80211softmac_suggest_txrate(softmac,
- is_multicast_ether_addr(hdr->addr1), is_mgt);
+ if (short_preamble)
+ flags |= R2M_SHORT_PREAMBLE;
- cs_rate = rate_to_cs_rate(rate);
- cs->modulation = cs_rate_to_modulation(cs_rate, 0);
+ zd_rate = rate_to_zd_rate(rate);
+ cs->modulation = zd_rate_to_modulation(zd_rate, flags);
}
static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
struct ieee80211_hdr_4addr *header)
{
+ struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
unsigned int tx_length = le16_to_cpu(cs->tx_length);
u16 fctl = le16_to_cpu(header->frame_ctl);
u16 ftype = WLAN_FC_GET_TYPE(fctl);
u16 stype = WLAN_FC_GET_STYPE(fctl);
/*
- * CONTROL:
- * - start at 0x00
- * - if fragment 0, enable bit 0
+ * CONTROL TODO:
* - if backoff needed, enable bit 0
* - if burst (backoff not needed) disable bit 0
- * - if multicast, enable bit 1
- * - if PS-POLL frame, enable bit 2
- * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable
- * bit 4 (FIXME: wtf)
- * - if frag_len > RTS threshold, set bit 5 as long if it isnt
- * multicast or mgt
- * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit
- * 7
*/
cs->control = 0;
if (stype == IEEE80211_STYPE_PSPOLL)
cs->control |= ZD_CS_PS_POLL_FRAME;
+ /* Unicast data frames over the threshold should have RTS */
if (!is_multicast_ether_addr(header->addr1) &&
- ftype != IEEE80211_FTYPE_MGMT &&
- tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
- {
- /* FIXME: check the logic */
- if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) {
- /* 802.11g */
- cs->control |= ZD_CS_SELF_CTS;
- } else { /* 802.11b */
- cs->control |= ZD_CS_RTS;
- }
+ ftype != IEEE80211_FTYPE_MGMT &&
+ tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
+ cs->control |= ZD_CS_RTS;
+
+ /* Use CTS-to-self protection if required */
+ if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM &&
+ ieee80211softmac_protection_needed(softmac)) {
+ /* FIXME: avoid sending RTS *and* self-CTS, is that correct? */
+ cs->control &= ~ZD_CS_RTS;
+ cs->control |= ZD_CS_SELF_CTS;
}
/* FIXME: Management frame? */
u8 rt_rate;
u16 rt_channel;
u16 rt_chbitmask;
- };
+ } __attribute__((packed));
static void fill_rt_header(void *buffer, struct zd_mac *mac,
const struct ieee80211_rx_stats *stats,
(netdev->flags & IFF_PROMISC);
}
- /* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0
- * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is
- * called here.
+ /* Filters received packets. The function returns 1 if the packet should be
+ * forwarded to ieee80211_rx(). If the packet should be ignored the function
+ * returns 0. If an invalid packet is found the function returns -EINVAL.
+ *
+ * The function calls ieee80211_rx_mgt() directly.
*
* It has been based on ieee80211_rx_any.
*/
ieee80211_rx_mgt(ieee, hdr, stats);
return 0;
case IEEE80211_FTYPE_CTL:
- /* Ignore invalid short buffers */
return 0;
case IEEE80211_FTYPE_DATA:
+ /* Ignore invalid short buffers */
if (length < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
return is_data_packet_for_us(ieee, hdr);
static void softmac_init(struct ieee80211softmac_device *sm)
{
sm->set_channel = set_channel;
+ sm->bssinfo_change = bssinfo_change;
}
struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
return iw_stats;
}
- #ifdef DEBUG
- static const char* decryption_types[] = {
- [ZD_RX_NO_WEP] = "none",
- [ZD_RX_WEP64] = "WEP64",
- [ZD_RX_TKIP] = "TKIP",
- [ZD_RX_AES] = "AES",
- [ZD_RX_WEP128] = "WEP128",
- [ZD_RX_WEP256] = "WEP256",
- };
-
- static const char *decryption_type_string(u8 type)
- {
- const char *s;
-
- if (type < ARRAY_SIZE(decryption_types)) {
- s = decryption_types[type];
- } else {
- s = NULL;
- }
- return s ? s : "unknown";
- }
-
- static int is_ofdm(u8 frame_status)
- {
- return (frame_status & ZD_RX_OFDM);
- }
-
- void zd_dump_rx_status(const struct rx_status *status)
- {
- const char* modulation;
- u8 quality;
-
- if (is_ofdm(status->frame_status)) {
- modulation = "ofdm";
- quality = status->signal_quality_ofdm;
- } else {
- modulation = "cck";
- quality = status->signal_quality_cck;
- }
- pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
- modulation, status->signal_strength, quality,
- decryption_type_string(status->decryption_type));
- if (status->frame_status & ZD_RX_ERROR) {
- pr_debug("rx error %s%s%s%s%s%s\n",
- (status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
- "timeout " : "",
- (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
- "fifo " : "",
- (status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
- "decryption " : "",
- (status->frame_status & ZD_RX_CRC32_ERROR) ?
- "crc32 " : "",
- (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
- "addr1 " : "",
- (status->frame_status & ZD_RX_CRC16_ERROR) ?
- "crc16" : "");
- }
- }
- #endif /* DEBUG */
-
#define LINK_LED_WORK_DELAY HZ
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
{
- struct zd_mac *mac = p;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, housekeeping.link_led_work.work);
struct zd_chip *chip = &mac->chip;
struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
int is_associated;
static void housekeeping_init(struct zd_mac *mac)
{
- INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
+ INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
}
static void housekeeping_enable(struct zd_mac *mac)
#include <linux/wireless.h>
#include <linux/kernel.h>
+ #include <linux/workqueue.h>
#include <net/ieee80211.h>
#include <net/ieee80211softmac.h>
#define ZD_CS_CCK 0x00
#define ZD_CS_OFDM 0x10
- #define ZD_CS_CCK_RATE_1M 0x00
- #define ZD_CS_CCK_RATE_2M 0x01
- #define ZD_CS_CCK_RATE_5_5M 0x02
- #define ZD_CS_CCK_RATE_11M 0x03
+ /* These are referred to as zd_rates */
+ #define ZD_CCK_RATE_1M 0x00
+ #define ZD_CCK_RATE_2M 0x01
+ #define ZD_CCK_RATE_5_5M 0x02
+ #define ZD_CCK_RATE_11M 0x03
/* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*.
*/
struct rx_length_info {
__le16 length[3];
__le16 tag;
- };
+ } __attribute__((packed));
#define RX_LENGTH_INFO_TAG 0x697e
u8 signal_quality_ofdm;
u8 decryption_type;
u8 frame_status;
- };
+ } __attribute__((packed));
/* rx_status field decryption_type */
#define ZD_RX_NO_WEP 0
#define ZD_RX_CRC16_ERROR 0x40
#define ZD_RX_ERROR 0x80
- enum mac_flags {
- MAC_FIXED_CHANNEL = 0x01,
- };
-
struct housekeeping {
- struct work_struct link_led_work;
+ struct delayed_work link_led_work;
};
#define ZD_MAC_STATS_BUFFER_SIZE 16
struct zd_chip chip;
spinlock_t lock;
struct net_device *netdev;
+
/* Unlocked reading possible */
struct iw_statistics iw_stats;
+
struct housekeeping housekeeping;
+ struct work_struct set_rts_cts_work;
+ struct work_struct set_basic_rates_work;
+
unsigned int stats_count;
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
u8 regdomain;
u8 default_regdomain;
u8 requested_channel;
+
+ /* A bitpattern of cr_rates */
+ u16 basic_rates;
+
+ /* A zd_rate */
+ u8 rts_rate;
+
+ /* Short preamble (used for RTS/CTS) */
+ unsigned int short_preamble:1;
+
+ /* flags to indicate update in progress */
+ unsigned int updating_rts_rate:1;
+ unsigned int updating_basic_rates:1;
};
static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac)
u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
int zd_mac_request_channel(struct zd_mac *mac, u8 channel);
- int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags);
+ u8 zd_mac_get_channel(struct zd_mac *mac);
int zd_mac_set_mode(struct zd_mac *mac, u32 mode);
int zd_mac_get_mode(struct zd_mac *mac, u32 *mode);
}
-static void pcmcia_delayed_add_pseudo_device(void *data)
+static void pcmcia_delayed_add_pseudo_device(struct work_struct *work)
{
- struct pcmcia_socket *s = data;
+ struct pcmcia_socket *s =
+ container_of(work, struct pcmcia_socket, device_add);
pcmcia_device_add(s, 0);
s->pcmcia_state.device_add_pending = 0;
}
init_waitqueue_head(&socket->queue);
#endif
INIT_LIST_HEAD(&socket->devices_list);
- INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket);
+ INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device);
memset(&socket->pcmcia_state, 0, sizeof(u8));
socket->device_count = 0;
pccard_register_pcmcia(socket, NULL);
/* unregister any unbound devices */
+ mutex_lock(&socket->skt_mutex);
pcmcia_card_remove(socket, NULL);
+ mutex_unlock(&socket->skt_mutex);
pcmcia_put_socket(socket);
* Routine to poll RTC seconds field for change as often as possible,
* after first RTC_UIE use timer to reduce polling
*/
-static void rtc_uie_task(void *data)
+static void rtc_uie_task(struct work_struct *work)
{
- struct rtc_device *rtc = data;
+ struct rtc_device *rtc =
+ container_of(work, struct rtc_device, uie_task);
struct rtc_time tm;
int num = 0;
int err;
err = rtc_read_time(&rtc->class_dev, &tm);
- spin_lock_irq(&rtc->irq_lock);
+
+ local_irq_disable();
+ spin_lock(&rtc->irq_lock);
if (rtc->stop_uie_polling || err) {
rtc->uie_task_active = 0;
} else if (rtc->oldsecs != tm.tm_sec) {
} else if (schedule_work(&rtc->uie_task) == 0) {
rtc->uie_task_active = 0;
}
- spin_unlock_irq(&rtc->irq_lock);
+ spin_unlock(&rtc->irq_lock);
if (num)
rtc_update_irq(&rtc->class_dev, num, RTC_UF | RTC_IRQF);
+ local_irq_enable();
}
-
static void rtc_uie_timer(unsigned long data)
{
struct rtc_device *rtc = (struct rtc_device *)data;
struct rtc_wkalrm alarm;
void __user *uarg = (void __user *) arg;
- /* check that the calles has appropriate permissions
+ /* check that the calling task has appropriate permissions
* for certain ioctls. doing this check here is useful
* to avoid duplicate code in each driver.
*/
/* avoid conflicting IRQ users */
if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) {
- spin_lock(&rtc->irq_task_lock);
+ spin_lock_irq(&rtc->irq_task_lock);
if (rtc->irq_task)
err = -EBUSY;
- spin_unlock(&rtc->irq_task_lock);
+ spin_unlock_irq(&rtc->irq_task_lock);
if (err < 0)
return err;
err = rtc_set_time(class_dev, &tm);
break;
+
+ case RTC_IRQP_READ:
+ if (ops->irq_set_freq)
+ err = put_user(rtc->irq_freq, (unsigned long *) arg);
+ break;
+
+ case RTC_IRQP_SET:
+ if (ops->irq_set_freq)
+ err = rtc_irq_set_freq(class_dev, rtc->irq_task, arg);
+ break;
+
#if 0
case RTC_EPOCH_SET:
#ifndef rtc_epoch
spin_lock_init(&rtc->irq_lock);
init_waitqueue_head(&rtc->irq_queue);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
+ INIT_WORK(&rtc->uie_task, rtc_uie_task);
setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
#endif
struct speedtch_params params; /* set in probe, constant afterwards */
- struct work_struct status_checker;
+ struct delayed_work status_checker;
unsigned char last_status;
return ret;
}
-static void speedtch_check_status(struct speedtch_instance_data *instance)
+static void speedtch_check_status(struct work_struct *work)
{
+ struct speedtch_instance_data *instance =
+ container_of(work, struct speedtch_instance_data,
+ status_checker.work);
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
unsigned char *buf = instance->scratch_buffer;
{
struct speedtch_instance_data *instance = (void *)data;
- schedule_work(&instance->status_checker);
+ schedule_delayed_work(&instance->status_checker, 0);
/* The following check is racy, but the race is harmless */
if (instance->poll_delay < MAX_POLL_DELAY)
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
if (!ret)
- schedule_work(&instance->status_checker);
+ schedule_delayed_work(&instance->status_checker, 0);
else {
atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
if ((int_urb = instance->int_urb)) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
- schedule_work(&instance->status_checker);
+ schedule_delayed_work(&instance->status_checker, 0);
if (ret < 0) {
atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
goto fail;
const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
if ((endpoint_desc->bEndpointAddress == target_address)) {
- use_isoc = (endpoint_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_ISOC;
+ use_isoc =
+ usb_endpoint_xfer_isoc(endpoint_desc);
break;
}
}
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
- INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
+ INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
instance->status_checker.timer.function = speedtch_status_poll;
instance->status_checker.timer.data = (unsigned long)instance;
int ret = -ENOMEM;
u8 *xfer_buff;
- xfer_buff = kmalloc(size, GFP_KERNEL);
+ xfer_buff = kmemdup(buff, size, GFP_KERNEL);
if (xfer_buff) {
- memcpy(xfer_buff, buff, size);
ret = usb_control_msg(usb,
usb_sndctrlpipe(usb, 0),
LOAD_INTERNAL,
u8 *xfer_buff;
int bytes_read;
- xfer_buff = kmalloc(size, GFP_KERNEL);
+ xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
- memcpy(xfer_buff, data, size);
-
ret = usb_bulk_msg(sc->usb_dev,
usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE),
xfer_buff, size, &bytes_read, BULK_TIMEOUT);
/*
* The uea_load_page() function must be called within a process context
*/
-static void uea_load_page(void *xsc)
+static void uea_load_page(struct work_struct *work)
{
- struct uea_softc *sc = xsc;
+ struct uea_softc *sc = container_of(work, struct uea_softc, task);
u16 pageno = sc->pageno;
u16 ovl = sc->ovl;
struct block_info bi;
u8 *xfer_buff;
int ret = -ENOMEM;
- xfer_buff = kmalloc(size, GFP_KERNEL);
+ xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
- memcpy(xfer_buff, data, size);
ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0),
UCDC_SEND_ENCAPSULATED_COMMAND,
uea_enters(INS_TO_USBDEV(sc));
- INIT_WORK(&sc->task, uea_load_page, sc);
+ INIT_WORK(&sc->task, uea_load_page);
init_waitqueue_head(&sc->sync_q);
init_waitqueue_head(&sc->cmv_ack_wait);
schedule_work(&acm->work);
}
-static void acm_softint(void *private)
+static void acm_softint(struct work_struct *work)
{
- struct acm *acm = private;
+ struct acm *acm = container_of(work, struct acm, work);
dbg("Entering acm_softint.");
if (!ACM_READY(acm))
/* workaround for switched endpoints */
- if ((epread->bEndpointAddress & USB_DIR_IN) != USB_DIR_IN) {
+ if (!usb_endpoint_dir_in(epread)) {
/* descriptors are swapped */
struct usb_endpoint_descriptor *t;
dev_dbg(&intf->dev,"The data interface has switched endpoints");
acm->rx_buflimit = num_rx_buf;
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
- INIT_WORK(&acm->work, acm_softint, acm);
+ INIT_WORK(&acm->work, acm_softint);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
#include "hcd.h"
#include "hub.h"
+ struct usb_hub {
+ struct device *intfdev; /* the "interface" device */
+ struct usb_device *hdev;
+ struct urb *urb; /* for interrupt polling pipe */
+
+ /* buffer for urb ... with extra space in case of babble */
+ char (*buffer)[8];
+ dma_addr_t buffer_dma; /* DMA address for buffer */
+ union {
+ struct usb_hub_status hub;
+ struct usb_port_status port;
+ } *status; /* buffer for status reports */
+
+ int error; /* last reported error */
+ int nerrors; /* track consecutive errors */
+
+ struct list_head event_list; /* hubs w/data or errs ready */
+ unsigned long event_bits[1]; /* status change bitmask */
+ unsigned long change_bits[1]; /* ports with logical connect
+ status change */
+ unsigned long busy_bits[1]; /* ports being reset or
+ resumed */
+ #if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
+ #error event_bits[] is too short!
+ #endif
+
+ struct usb_hub_descriptor *descriptor; /* class descriptor */
+ struct usb_tt tt; /* Transaction Translator */
+
+ unsigned mA_per_port; /* current for each child */
+
+ unsigned limited_power:1;
+ unsigned quiescing:1;
+ unsigned activating:1;
+
+ unsigned has_indicators:1;
+ u8 indicator[USB_MAXCHILDREN];
+ struct work_struct leds;
+ };
+
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
static struct task_struct *khubd_task;
+ /* multithreaded probe logic */
+ static int multithread_probe =
+ #ifdef CONFIG_USB_MULTITHREAD_PROBE
+ 1;
+ #else
+ 0;
+ #endif
+ module_param(multithread_probe, bool, S_IRUGO);
+ MODULE_PARM_DESC(multithread_probe, "Run each USB device probe in a new thread");
+
/* cycle leds on hubs that aren't blinking for attention */
static int blinkenlights = 0;
module_param (blinkenlights, bool, S_IRUGO);
#define LED_CYCLE_PERIOD ((2*HZ)/3)
-static void led_work (void *__hub)
+static void led_work (struct work_struct *work)
{
- struct usb_hub *hub = __hub;
+ struct usb_hub *hub =
+ container_of(work, struct usb_hub, leds.work);
struct usb_device *hdev = hub->hdev;
unsigned i;
unsigned changed = 0;
{
unsigned long flags;
+ /* Suppress autosuspend until khubd runs */
+ to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
+
spin_lock_irqsave(&hub_event_lock, flags);
if (list_empty(&hub->event_list)) {
list_add_tail(&hub->event_list, &hub_event_list);
* talking to TTs must queue control transfers (not just bulk and iso), so
* both can talk to the same hub concurrently.
*/
-static void hub_tt_kevent (void *arg)
+static void hub_tt_kevent (struct work_struct *work)
{
- struct usb_hub *hub = arg;
+ struct usb_hub *hub =
+ container_of(work, struct usb_hub, tt.kevent);
unsigned long flags;
spin_lock_irqsave (&hub->tt.lock, flags);
/* (nonblocking) khubd and related activity won't re-trigger */
hub->quiescing = 1;
hub->activating = 0;
- hub->resume_root_hub = 0;
/* (blocking) stop khubd and related activity */
usb_kill_urb(hub->urb);
hub->quiescing = 0;
hub->activating = 1;
- hub->resume_root_hub = 0;
+
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
dev_err(hub->intfdev, "activate --> %d\n", status);
spin_lock_init (&hub->tt.lock);
INIT_LIST_HEAD (&hub->tt.clear_list);
- INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
+ INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
switch (hdev->descriptor.bDeviceProtocol) {
case 0:
break;
dev_dbg(hub_dev, "%sover-current condition exists\n",
(hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
- /* set up the interrupt endpoint */
+ /* set up the interrupt endpoint
+ * We use the EP's maxpacket size instead of (PORTS+1+7)/8
+ * bytes as USB2.0[11.12.3] says because some hubs are known
+ * to send more data (and thus cause overflow). For root hubs,
+ * maxpktsize is defined in hcd.c's fake endpoint descriptors
+ * to be big enough for at least USB_MAXCHILDREN ports. */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
INIT_LIST_HEAD(&hub->event_list);
hub->intfdev = &intf->dev;
hub->hdev = hdev;
- INIT_WORK(&hub->leds, led_work, hub);
+ INIT_DELAYED_WORK(&hub->leds, led_work);
usb_set_intfdata (intf, hub);
+ intf->needs_remote_wakeup = 1;
if (hdev->speed == USB_SPEED_HIGH)
highspeed_hubs++;
if (udev->children[i])
recursively_mark_NOTATTACHED(udev->children[i]);
}
+ if (udev->state == USB_STATE_SUSPENDED)
+ udev->discon_suspended = 1;
udev->state = USB_STATE_NOTATTACHED;
}
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
+ /* Decrement the parent's count of unsuspended children */
+ if (udev->parent) {
+ usb_pm_lock(udev);
+ if (!udev->discon_suspended)
+ usb_autosuspend_device(udev->parent);
+ usb_pm_unlock(udev);
+ }
+
put_device(&udev->dev);
}
static int __usb_port_suspend(struct usb_device *, int port1);
#endif
- /**
- * usb_new_device - perform initial device setup (usbcore-internal)
- * @udev: newly addressed device (in ADDRESS state)
- *
- * This is called with devices which have been enumerated, but not yet
- * configured. The device descriptor is available, but not descriptors
- * for any device configuration. The caller must have locked either
- * the parent hub (if udev is a normal device) or else the
- * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
- * udev has already been installed, but udev is not yet visible through
- * sysfs or other filesystem code.
- *
- * Returns 0 for success (device is configured and listed, with its
- * interfaces, in sysfs); else a negative errno value.
- *
- * This call is synchronous, and may not be used in an interrupt context.
- *
- * Only the hub driver or root-hub registrar should ever call this.
- */
- int usb_new_device(struct usb_device *udev)
+ static int __usb_new_device(void *void_data)
{
+ struct usb_device *udev = void_data;
int err;
+ /* Lock ourself into memory in order to keep a probe sequence
+ * sleeping in a new thread from allowing us to be unloaded.
+ */
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
err = usb_get_configuration(udev);
if (err < 0) {
dev_err(&udev->dev, "can't read configurations, error %d\n",
goto fail;
}
- return 0;
+ /* Increment the parent's count of unsuspended children */
+ if (udev->parent)
+ usb_autoresume_device(udev->parent);
+
+ exit:
+ module_put(THIS_MODULE);
+ return err;
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
- return err;
+ goto exit;
}
+ /**
+ * usb_new_device - perform initial device setup (usbcore-internal)
+ * @udev: newly addressed device (in ADDRESS state)
+ *
+ * This is called with devices which have been enumerated, but not yet
+ * configured. The device descriptor is available, but not descriptors
+ * for any device configuration. The caller must have locked either
+ * the parent hub (if udev is a normal device) or else the
+ * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
+ * udev has already been installed, but udev is not yet visible through
+ * sysfs or other filesystem code.
+ *
+ * The return value for this function depends on if the
+ * multithread_probe variable is set or not. If it's set, it will
+ * return a if the probe thread was successfully created or not. If the
+ * variable is not set, it will return if the device is configured
+ * properly or not. interfaces, in sysfs); else a negative errno value.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ *
+ * Only the hub driver or root-hub registrar should ever call this.
+ */
+ int usb_new_device(struct usb_device *udev)
+ {
+ struct task_struct *probe_task;
+ int ret = 0;
+
+ if (multithread_probe) {
+ probe_task = kthread_run(__usb_new_device, udev,
+ "usb-probe-%s", udev->devnum);
+ if (IS_ERR(probe_task))
+ ret = PTR_ERR(probe_task);
+ } else
+ ret = __usb_new_device(udev);
+
+ return ret;
+ }
static int hub_port_status(struct usb_hub *hub, int port1,
u16 *status, u16 *change)
int ret;
ret = get_port_status(hub->hdev, port1, &hub->status->port);
- if (ret < 0)
+ if (ret < 4) {
dev_err (hub->intfdev,
"%s failed (err = %d)\n", __FUNCTION__, ret);
- else {
+ if (ret >= 0)
+ ret = -EIO;
+ } else {
*status = le16_to_cpu(hub->status->port.wPortStatus);
*change = le16_to_cpu(hub->status->port.wPortChange);
ret = 0;
hub_port_resume(struct usb_hub *hub, int port1, struct usb_device *udev)
{
int status;
+ u16 portchange, portstatus;
+
+ /* Skip the initial Clear-Suspend step for a remote wakeup */
+ status = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (status == 0 && !(portstatus & USB_PORT_STAT_SUSPEND))
+ goto SuspendCleared;
// dev_dbg(hub->intfdev, "resume port %d\n", port1);
"can't resume port %d, status %d\n",
port1, status);
} else {
- u16 devstatus;
- u16 portchange;
-
/* drive resume for at least 20 msec */
if (udev)
dev_dbg(&udev->dev, "usb %sresume\n",
* stop resume signaling. Then finish the resume
* sequence.
*/
- devstatus = portchange = 0;
- status = hub_port_status(hub, port1,
- &devstatus, &portchange);
+ status = hub_port_status(hub, port1, &portstatus, &portchange);
+ SuspendCleared:
if (status < 0
- || (devstatus & LIVE_FLAGS) != LIVE_FLAGS
- || (devstatus & USB_PORT_STAT_SUSPEND) != 0
+ || (portstatus & LIVE_FLAGS) != LIVE_FLAGS
+ || (portstatus & USB_PORT_STAT_SUSPEND) != 0
) {
dev_dbg(hub->intfdev,
"port %d status %04x.%04x after resume, %d\n",
- port1, portchange, devstatus, status);
+ port1, portchange, portstatus, status);
if (status >= 0)
status = -ENODEV;
} else {
{
int status = 0;
- /* All this just to avoid sending a port-resume message
- * to the parent hub! */
-
usb_lock_device(udev);
- usb_pm_lock(udev);
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
- /* TRSMRCY = 10 msec */
- msleep(10);
- status = finish_port_resume(udev);
+ status = usb_autoresume_device(udev);
+
+ /* Give the interface drivers a chance to do something,
+ * then autosuspend the device again. */
if (status == 0)
- udev->dev.power.power_state.event = PM_EVENT_ON;
+ usb_autosuspend_device(udev);
}
- usb_pm_unlock(udev);
-
- if (status == 0)
- usb_autoresume_device(udev, 0);
usb_unlock_device(udev);
return status;
}
}
}
+ dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
+
/* "global suspend" of the downstream HC-to-USB interface */
if (!hdev->parent) {
struct usb_bus *bus = hdev->bus;
static int hub_resume(struct usb_interface *intf)
{
- struct usb_device *hdev = interface_to_usbdev(intf);
struct usb_hub *hub = usb_get_intfdata (intf);
+ struct usb_device *hdev = hub->hdev;
int status;
+ dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
+
/* "global resume" of the downstream HC-to-USB interface */
if (!hdev->parent) {
struct usb_bus *bus = hdev->bus;
{
struct usb_hub *hub = hdev_to_hub(hdev);
- hub->resume_root_hub = 1;
kick_khubd(hub);
}
/* hub LEDs are probably harder to miss than syslog */
if (hub->has_indicators) {
hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
- schedule_work (&hub->leds);
+ schedule_delayed_work (&hub->leds, 0);
}
}
kfree(qual);
if (hub->has_indicators) {
hub->indicator[port1-1] =
INDICATOR_AMBER_BLINK;
- schedule_work (&hub->leds);
+ schedule_delayed_work (&hub->leds, 0);
}
status = -ENOTCONN; /* Don't retry */
goto loop_disable;
intf = to_usb_interface(hub->intfdev);
hub_dev = &intf->dev;
- i = hub->resume_root_hub;
-
- dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x%s\n",
+ dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hub->descriptor
? hub->descriptor->bNbrPorts
: 0,
/* NOTE: expects max 15 ports... */
(u16) hub->change_bits[0],
- (u16) hub->event_bits[0],
- i ? ", resume root" : "");
+ (u16) hub->event_bits[0]);
usb_get_intf(intf);
spin_unlock_irq(&hub_event_lock);
goto loop;
}
- /* Is this is a root hub wanting to reactivate the downstream
- * ports? If so, be sure the interface resumes even if its
- * stub "device" node was never suspended.
- */
- if (i)
- usb_autoresume_device(hdev, 0);
+ /* Autoresume */
+ ret = usb_autopm_get_interface(intf);
+ if (ret) {
+ dev_dbg(hub_dev, "Can't autoresume: %d\n", ret);
+ goto loop;
+ }
- /* If this is an inactive or suspended hub, do nothing */
+ /* If this is an inactive hub, do nothing */
if (hub->quiescing)
- goto loop;
+ goto loop_autopm;
if (hub->error) {
dev_dbg (hub_dev, "resetting for error %d\n",
if (ret) {
dev_dbg (hub_dev,
"error resetting hub: %d\n", ret);
- goto loop;
+ goto loop_autopm;
}
hub->nerrors = 0;
if (!hdev->parent && !hub->busy_bits[0])
usb_enable_root_hub_irq(hdev->bus);
+ loop_autopm:
+ /* Allow autosuspend if we're not going to run again */
+ if (list_empty(&hub->event_list))
+ usb_autopm_enable(intf);
loop:
usb_unlock_device(hdev);
usb_put_intf(intf);
.post_reset = hub_post_reset,
.ioctl = hub_ioctl,
.id_table = hub_id_table,
+ .supports_autosuspend = 1,
};
int usb_hub_init(void)
}
/* Prevent autosuspend during the reset */
- usb_autoresume_device(udev, 1);
+ usb_autoresume_device(udev);
if (iface && iface->condition != USB_INTERFACE_BINDING)
iface = NULL;
}
}
- usb_autosuspend_device(udev, 1);
+ usb_autosuspend_device(udev);
return ret;
}
EXPORT_SYMBOL(usb_reset_composite_device);
err = -EINVAL;
goto errout;
} else {
- dev->have_langid = -1;
+ dev->have_langid = 1;
dev->string_langid = tbuf[2] | (tbuf[3]<< 8);
/* always use the first langid listed */
dev_dbg (&dev->dev, "default language 0x%04x\n",
}
/* Wake up the device so we can send it the Set-Config request */
- ret = usb_autoresume_device(dev, 1);
+ ret = usb_autoresume_device(dev);
if (ret)
goto free_interfaces;
dev->actconfig = cp;
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
- usb_autosuspend_device(dev, 1);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
usb_set_device_state(dev, USB_STATE_CONFIGURED);
usb_create_sysfs_intf_files (intf);
}
- usb_autosuspend_device(dev, 1);
+ usb_autosuspend_device(dev);
return 0;
}
};
/* Worker routine for usb_driver_set_configuration() */
-static void driver_set_config_work(void *_req)
+static void driver_set_config_work(struct work_struct *work)
{
- struct set_config_request *req = _req;
+ struct set_config_request *req =
+ container_of(work, struct set_config_request, work);
usb_lock_device(req->udev);
usb_set_configuration(req->udev, req->config);
return -ENOMEM;
req->udev = udev;
req->config = config;
- INIT_WORK(&req->work, driver_set_config_work, req);
+ INIT_WORK(&req->work, driver_set_config_work);
usb_get_dev(udev);
if (!schedule_work(&req->work)) {
destroy_workqueue(ksuspend_usb_wq);
}
- #else
-
- #define ksuspend_usb_init() 0
- #define ksuspend_usb_cleanup() do {} while (0)
-
- #endif
-
#ifdef CONFIG_USB_SUSPEND
/* usb_autosuspend_work - callback routine to autosuspend a USB device */
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
{
- struct usb_device *udev = _udev;
+ struct usb_device *udev =
+ container_of(work, struct usb_device, autosuspend.work);
usb_pm_lock(udev);
udev->auto_pm = 1;
#else
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
{}
- #endif
+ #endif /* CONFIG_USB_SUSPEND */
+
+ #else
+
+ #define ksuspend_usb_init() 0
+ #define ksuspend_usb_cleanup() do {} while (0)
+
+ #endif /* CONFIG_PM */
/**
* usb_alloc_dev - usb device constructor (usbcore-internal)
#ifdef CONFIG_PM
mutex_init(&dev->pm_mutex);
- INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
+ INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
#endif
return dev;
}
return usb_hcd_get_frame_number (dev);
}
- /**
- * usb_endpoint_dir_in - check if the endpoint has IN direction
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type IN, otherwise it returns false.
- */
- int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
- {
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
- }
-
- /**
- * usb_endpoint_dir_out - check if the endpoint has OUT direction
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type OUT, otherwise it returns false.
- */
- int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
- {
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
- }
-
- /**
- * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type bulk, otherwise it returns false.
- */
- int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
- {
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_BULK);
- }
-
- /**
- * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type interrupt, otherwise it returns
- * false.
- */
- int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
- {
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_INT);
- }
-
- /**
- * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type isochronous, otherwise it returns
- * false.
- */
- int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
- {
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_ISOC);
- }
-
- /**
- * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has bulk transfer type and IN direction,
- * otherwise it returns false.
- */
- int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
- {
- return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
- }
-
- /**
- * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has bulk transfer type and OUT direction,
- * otherwise it returns false.
- */
- int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
- {
- return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
- }
-
- /**
- * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has interrupt transfer type and IN direction,
- * otherwise it returns false.
- */
- int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
- {
- return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
- }
-
- /**
- * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has interrupt transfer type and OUT direction,
- * otherwise it returns false.
- */
- int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
- {
- return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
- }
-
- /**
- * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has isochronous transfer type and IN direction,
- * otherwise it returns false.
- */
- int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
- {
- return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
- }
-
- /**
- * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has isochronous transfer type and OUT direction,
- * otherwise it returns false.
- */
- int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
- {
- return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
- }
-
/*-------------------------------------------------------------------*/
/*
* __usb_get_extra_descriptor() finds a descriptor of specific type in the
EXPORT_SYMBOL(usb_find_device);
EXPORT_SYMBOL(usb_get_current_frame_number);
- EXPORT_SYMBOL_GPL(usb_endpoint_dir_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_dir_out);
- EXPORT_SYMBOL_GPL(usb_endpoint_xfer_bulk);
- EXPORT_SYMBOL_GPL(usb_endpoint_xfer_int);
- EXPORT_SYMBOL_GPL(usb_endpoint_xfer_isoc);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_bulk_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_bulk_out);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_int_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_int_out);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_isoc_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_isoc_out);
-
EXPORT_SYMBOL (usb_buffer_alloc);
EXPORT_SYMBOL (usb_buffer_free);
spin_unlock_irqrestore(&dev->req_lock, flags);
}
-static void eth_work (void *_dev)
+static void eth_work (struct work_struct *work)
{
- struct eth_dev *dev = _dev;
+ struct eth_dev *dev = container_of(work, struct eth_dev, work);
if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
if (netif_running (dev->net))
if (!eth_is_promisc (dev)) {
u8 *dest = skb->data;
- if (dest [0] & 0x01) {
+ if (is_multicast_ether_addr(dest)) {
u16 type;
/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
* SET_ETHERNET_MULTICAST_FILTERS requests
*/
- if (memcmp (dest, net->broadcast, ETH_ALEN) == 0)
+ if (is_broadcast_ether_addr(dest))
type = USB_CDC_PACKET_TYPE_BROADCAST;
else
type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
dev = netdev_priv(net);
spin_lock_init (&dev->lock);
spin_lock_init (&dev->req_lock);
- INIT_WORK (&dev->work, eth_work, dev);
+ INIT_WORK (&dev->work, eth_work);
INIT_LIST_HEAD (&dev->tx_reqs);
INIT_LIST_HEAD (&dev->rx_reqs);
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
- DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
+ static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
/*
* u132_module_lock exists to protect access to global variables
*
u16 queue_next;
struct urb *urb_list[ENDP_QUEUE_SIZE];
struct list_head urb_more;
- struct work_struct scheduler;
+ struct delayed_work scheduler;
};
struct u132_ring {
unsigned in_use:1;
u8 number;
struct u132 *u132;
struct u132_endp *curr_endp;
- struct work_struct scheduler;
+ struct delayed_work scheduler;
};
#define OHCI_QUIRK_AMD756 0x01
#define OHCI_QUIRK_SUPERIO 0x02
u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
int flags;
unsigned long next_statechange;
- struct work_struct monitor;
+ struct delayed_work monitor;
int num_endpoints;
struct u132_addr addr[MAX_U132_ADDRS];
struct u132_udev udev[MAX_U132_UDEVS];
struct u132_port port[MAX_U132_PORTS];
struct u132_endp *endp[MAX_U132_ENDPS];
};
- int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data);
- int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, u8 addressofs,
- u8 width, u32 *data);
- int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, u8 addressofs,
- u8 width, u32 data);
+
/*
- * these can not be inlines because we need the structure offset!!
+ * these cannot be inlines because we need the structure offset!!
* Does anyone have a better way?????
*/
#define u132_read_pcimem(u132, member, data) \
if (delta > 0) {
if (queue_delayed_work(workqueue, &ring->scheduler, delta))
return;
- } else if (queue_work(workqueue, &ring->scheduler))
+ } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
return;
kref_put(&u132->kref, u132_hcd_delete);
return;
static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(workqueue, &endp->scheduler, delta))
- kref_get(&endp->kref);
- } else if (queue_work(workqueue, &endp->scheduler))
- kref_get(&endp->kref);
- return;
+ if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+ kref_get(&endp->kref);
}
static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
- kref_get(&u132->kref);
- }
- } else if (queue_work(workqueue, &u132->monitor))
- kref_get(&u132->kref);
- return;
+ if (queue_delayed_work(workqueue, &u132->monitor, delta))
+ kref_get(&u132->kref);
}
static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(workqueue, &u132->monitor, delta))
- return;
- } else if (queue_work(workqueue, &u132->monitor))
- return;
- kref_put(&u132->kref, u132_hcd_delete);
- return;
+ if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+ kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_monitor_cancel_work(struct u132 *u132)
return 0;
}
-static void u132_hcd_monitor_work(void *data)
+static void u132_hcd_monitor_work(struct work_struct *work)
{
- struct u132 *u132 = data;
+ struct u132 *u132 = container_of(work, struct u132, monitor.work);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
}
}
-static void u132_hcd_ring_work_scheduler(void *data);
-static void u132_hcd_endp_work_scheduler(void *data);
/*
* this work function is only executed from the work queue
*
*/
-static void u132_hcd_ring_work_scheduler(void *data)
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
{
- struct u132_ring *ring = data;
+ struct u132_ring *ring =
+ container_of(work, struct u132_ring, scheduler.work);
struct u132 *u132 = ring->u132;
down(&u132->scheduler_lock);
if (ring->in_use) {
}
}
-static void u132_hcd_endp_work_scheduler(void *data)
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
{
struct u132_ring *ring;
- struct u132_endp *endp = data;
+ struct u132_endp *endp =
+ container_of(work, struct u132_endp, scheduler.work);
struct u132 *u132 = endp->u132;
down(&u132->scheduler_lock);
ring = endp->ring;
if (!endp) {
return -ENOMEM;
}
- INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+ INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (!endp) {
return -ENOMEM;
}
- INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+ INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
endp->dequeueing = 0;
if (!endp) {
return -ENOMEM;
}
- INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+ INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
* This function may be called by the USB core whilst the "usb_all_devices_rwsem"
* is held for writing, thus this module must not call usb_remove_hcd()
* synchronously - but instead should immediately stop activity to the
- * device and ansynchronously call usb_remove_hcd()
+ * device and asynchronously call usb_remove_hcd()
*/
static int __devexit u132_remove(struct platform_device *pdev)
{
ring->number = rings + 1;
ring->length = 0;
ring->curr_endp = NULL;
- INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
- (void *)ring);
+ INIT_DELAYED_WORK(&ring->scheduler,
+ u132_hcd_ring_work_scheduler);
} down(&u132->sw_lock);
- INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
+ INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
while (ports-- > 0) {
struct u132_port *port = &u132->port[ports];
port->u132 = u132;
#define u132_resume NULL
#endif
/*
- * this driver is loaded explicitely by ftdi_u132
+ * this driver is loaded explicitly by ftdi_u132
*
* the platform_driver struct is static because it is per type of module
*/
hid_io_error(hid);
}
- /* Workqueue routine to reset the device */
+ /* Workqueue routine to reset the device or clear a halt */
-static void hid_reset(void *_hid)
+static void hid_reset(struct work_struct *work)
{
- struct hid_device *hid = (struct hid_device *) _hid;
+ struct hid_device *hid =
+ container_of(work, struct hid_device, reset_work);
- int rc_lock, rc;
-
- dev_dbg(&hid->intf->dev, "resetting device\n");
- rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
- if (rc_lock >= 0) {
- rc = usb_reset_composite_device(hid->dev, hid->intf);
- if (rc_lock)
- usb_unlock_device(hid->dev);
+ int rc_lock, rc = 0;
+
+ if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
+ dev_dbg(&hid->intf->dev, "clear halt\n");
+ rc = usb_clear_halt(hid->dev, hid->urbin->pipe);
+ clear_bit(HID_CLEAR_HALT, &hid->iofl);
+ hid_start_in(hid);
+ }
+
+ else if (test_bit(HID_RESET_PENDING, &hid->iofl)) {
+ dev_dbg(&hid->intf->dev, "resetting device\n");
+ rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
+ if (rc_lock >= 0) {
+ rc = usb_reset_composite_device(hid->dev, hid->intf);
+ if (rc_lock)
+ usb_unlock_device(hid->dev);
+ }
+ clear_bit(HID_RESET_PENDING, &hid->iofl);
}
- clear_bit(HID_RESET_PENDING, &hid->iofl);
switch (rc) {
case 0:
/* Retries failed, so do a port reset */
if (!test_and_set_bit(HID_RESET_PENDING, &hid->iofl)) {
- if (schedule_work(&hid->reset_work))
- goto done;
- clear_bit(HID_RESET_PENDING, &hid->iofl);
+ schedule_work(&hid->reset_work);
+ goto done;
}
}
hid->retry_delay = 0;
hid_input_report(HID_INPUT_REPORT, urb, 1);
break;
+ case -EPIPE: /* stall */
+ clear_bit(HID_IN_RUNNING, &hid->iofl);
+ set_bit(HID_CLEAR_HALT, &hid->iofl);
+ schedule_work(&hid->reset_work);
+ return;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN: /* unplug */
#define USB_VENDOR_ID_APPLE 0x05ac
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
+ #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
+ #define USB_DEVICE_ID_APPLE_GEYSER_ISO 0x0215
+ #define USB_DEVICE_ID_APPLE_GEYSER_JIS 0x0216
+ #define USB_DEVICE_ID_APPLE_GEYSER3_ANSI 0x0217
+ #define USB_DEVICE_ID_APPLE_GEYSER3_ISO 0x0218
+ #define USB_DEVICE_ID_APPLE_GEYSER3_JIS 0x0219
+ #define USB_DEVICE_ID_APPLE_GEYSER4_ANSI 0x021a
+ #define USB_DEVICE_ID_APPLE_GEYSER4_ISO 0x021b
+ #define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_VENDOR_ID_CHERRY 0x046a
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
{ USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
- { USB_VENDOR_ID_APPLE, 0x020E, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x020F, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x0214, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x0215, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
- { USB_VENDOR_ID_APPLE, 0x0216, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x0217, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x0218, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
- { USB_VENDOR_ID_APPLE, 0x0219, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x021B, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x030A, HID_QUIRK_POWERBOOK_HAS_FN },
- { USB_VENDOR_ID_APPLE, 0x030B, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
{ USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
interval = hid_mousepoll_interval;
- if (endpoint->bEndpointAddress & USB_DIR_IN) {
+ if (usb_endpoint_dir_in(endpoint)) {
if (hid->urbin)
continue;
if (!(hid->urbin = usb_alloc_urb(0, GFP_KERNEL)))
init_waitqueue_head(&hid->wait);
- INIT_WORK(&hid->reset_work, hid_reset, hid);
+ INIT_WORK(&hid->reset_work, hid_reset);
setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&hid->inlock);
return hid;
fail:
-
- if (hid->urbin)
- usb_free_urb(hid->urbin);
- if (hid->urbout)
- usb_free_urb(hid->urbout);
- if (hid->urbctrl)
- usb_free_urb(hid->urbctrl);
+ usb_free_urb(hid->urbin);
+ usb_free_urb(hid->urbout);
+ usb_free_urb(hid->urbctrl);
hid_free_buffers(dev, hid);
hid_free_device(hid);
usb_free_urb(hid->urbin);
usb_free_urb(hid->urbctrl);
- if (hid->urbout)
- usb_free_urb(hid->urbout);
+ usb_free_urb(hid->urbout);
hid_free_buffers(hid->dev, hid);
hid_free_device(hid);
struct usb_device *udev;
struct usb_interface *interface;
struct usb_class_driver *class;
- struct work_struct status_work;
- struct work_struct command_work;
- struct work_struct respond_work;
+ struct delayed_work status_work;
+ struct delayed_work command_work;
+ struct delayed_work respond_work;
struct u132_platform_data platform_data;
struct resource resources[0];
struct platform_device platform_dev;
static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
- return;
- } else if (queue_work(status_queue, &ftdi->status_work))
- return;
- kref_put(&ftdi->kref, ftdi_elan_delete);
- return;
+ if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+ kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
- kref_get(&ftdi->kref);
- } else if (queue_work(status_queue, &ftdi->status_work))
- kref_get(&ftdi->kref);
- return;
+ if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+ kref_get(&ftdi->kref);
}
static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(command_queue, &ftdi->command_work,
- delta))
- return;
- } else if (queue_work(command_queue, &ftdi->command_work))
- return;
- kref_put(&ftdi->kref, ftdi_elan_delete);
- return;
+ if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+ kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(command_queue, &ftdi->command_work,
- delta))
- kref_get(&ftdi->kref);
- } else if (queue_work(command_queue, &ftdi->command_work))
- kref_get(&ftdi->kref);
- return;
+ if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+ kref_get(&ftdi->kref);
}
static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(respond_queue, &ftdi->respond_work,
- delta))
- return;
- } else if (queue_work(respond_queue, &ftdi->respond_work))
- return;
- kref_put(&ftdi->kref, ftdi_elan_delete);
- return;
+ if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+ kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (delta > 0) {
- if (queue_delayed_work(respond_queue, &ftdi->respond_work,
- delta))
- kref_get(&ftdi->kref);
- } else if (queue_work(respond_queue, &ftdi->respond_work))
- kref_get(&ftdi->kref);
- return;
+ if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+ kref_get(&ftdi->kref);
}
static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
EXPORT_SYMBOL_GPL(ftdi_elan_gone_away);
- void ftdi_release_platform_dev(struct device *dev)
+ static void ftdi_release_platform_dev(struct device *dev)
{
dev->parent = NULL;
}
return;
}
-static void ftdi_elan_command_work(void *data)
+static void ftdi_elan_command_work(struct work_struct *work)
{
- struct usb_ftdi *ftdi = data;
+ struct usb_ftdi *ftdi =
+ container_of(work, struct usb_ftdi, command_work.work);
+
if (ftdi->disconnected > 0) {
ftdi_elan_put_kref(ftdi);
return;
return;
}
-static void ftdi_elan_respond_work(void *data)
+static void ftdi_elan_respond_work(struct work_struct *work)
{
- struct usb_ftdi *ftdi = data;
+ struct usb_ftdi *ftdi =
+ container_of(work, struct usb_ftdi, respond_work.work);
if (ftdi->disconnected > 0) {
ftdi_elan_put_kref(ftdi);
return;
* after the FTDI has been synchronized
*
*/
-static void ftdi_elan_status_work(void *data)
+static void ftdi_elan_status_work(struct work_struct *work)
{
- struct usb_ftdi *ftdi = data;
+ struct usb_ftdi *ftdi =
+ container_of(work, struct usb_ftdi, status_work.work);
int work_delay_in_msec = 0;
if (ftdi->disconnected > 0) {
ftdi_elan_put_kref(ftdi);
}
}
- int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data)
- {
- struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev);
- return ftdi_elan_read_reg(ftdi, data);
- }
-
-
- EXPORT_SYMBOL_GPL(usb_ftdi_elan_read_reg);
static int ftdi_elan_read_config(struct usb_ftdi *ftdi, int config_offset,
u8 width, u32 *data)
{
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!ftdi->bulk_in_endpointAddr &&
- ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- == USB_DIR_IN) && ((endpoint->bmAttributes &
- USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK))
- {
+ usb_endpoint_is_bulk_in(endpoint)) {
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
ftdi->bulk_in_size = buffer_size;
ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress;
}
}
if (!ftdi->bulk_out_endpointAddr &&
- ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- == USB_DIR_OUT) && ((endpoint->bmAttributes &
- USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK))
- {
+ usb_endpoint_is_bulk_out(endpoint)) {
ftdi->bulk_out_endpointAddr =
endpoint->bEndpointAddress;
}
ftdi->class = NULL;
dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
"ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
- INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
- (void *)ftdi);
- INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
- (void *)ftdi);
- INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
- (void *)ftdi);
+ INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
+ INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
+ INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
return 0;
} else {
unsigned char *data;
dma_addr_t data_dma;
- struct work_struct do_notify;
- struct work_struct do_resubmit;
+ struct delayed_work do_notify;
+ struct delayed_work do_resubmit;
unsigned long input_events;
unsigned long sensor_events;
};
}
if (kit->input_events || kit->sensor_events)
- schedule_work(&kit->do_notify);
+ schedule_delayed_work(&kit->do_notify, 0);
resubmit:
status = usb_submit_urb(urb, SLAB_ATOMIC);
kit->udev->devpath, status);
}
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
{
- struct interfacekit *kit = data;
+ struct interfacekit *kit =
+ container_of(work, struct interfacekit, do_notify.work);
int i;
char sysfs_file[8];
}
}
-static void do_resubmit(void *data)
+static void do_resubmit(struct work_struct *work)
{
- set_outputs(data);
+ struct interfacekit *kit =
+ container_of(work, struct interfacekit, do_resubmit.work);
+ set_outputs(kit);
}
#define show_set_output(value) \
return -ENODEV;
endpoint = &interface->endpoint[0].desc;
- if (!(endpoint->bEndpointAddress & 0x80))
+ if (!usb_endpoint_dir_in(endpoint))
return -ENODEV;
/*
* bmAttributes
kit->udev = usb_get_dev(dev);
kit->intf = intf;
- INIT_WORK(&kit->do_notify, do_notify, kit);
- INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
+ INIT_DELAYED_WORK(&kit->do_notify, do_notify);
+ INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
interfacekit_irq, kit, endpoint->bInterval);
device_remove_file(kit->dev, &dev_output_attrs[i]);
out:
if (kit) {
- if (kit->irq)
- usb_free_urb(kit->irq);
+ usb_free_urb(kit->irq);
if (kit->data)
usb_buffer_free(dev, URB_INT_SIZE, kit->data, kit->data_dma);
if (kit->dev)
unsigned char *data;
dma_addr_t data_dma;
- struct work_struct do_notify;
+ struct delayed_work do_notify;
unsigned long input_events;
unsigned long speed_events;
unsigned long exceed_events;
set_bit(1, &mc->exceed_events);
if (mc->input_events || mc->exceed_events || mc->speed_events)
- schedule_work(&mc->do_notify);
+ schedule_delayed_work(&mc->do_notify, 0);
resubmit:
status = usb_submit_urb(urb, SLAB_ATOMIC);
mc->udev->devpath, status);
}
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
{
- struct motorcontrol *mc = data;
+ struct motorcontrol *mc =
+ container_of(work, struct motorcontrol, do_notify.work);
int i;
char sysfs_file[8];
return -ENODEV;
endpoint = &interface->endpoint[0].desc;
- if (!(endpoint->bEndpointAddress & 0x80))
+ if (!usb_endpoint_dir_in(endpoint))
return -ENODEV;
/*
mc->udev = usb_get_dev(dev);
mc->intf = intf;
mc->acceleration[0] = mc->acceleration[1] = 10;
- INIT_WORK(&mc->do_notify, do_notify, mc);
+ INIT_DELAYED_WORK(&mc->do_notify, do_notify);
usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
motorcontrol_irq, mc, endpoint->bInterval);
device_remove_file(mc->dev, &dev_attrs[i]);
out:
if (mc) {
- if (mc->irq)
- usb_free_urb(mc->irq);
+ usb_free_urb(mc->irq);
if (mc->data)
usb_buffer_free(dev, URB_INT_SIZE, mc->data, mc->data_dma);
if (mc->dev)
/* using ATOMIC, we'd never wake up if we slept */
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
+ set_current_state(TASK_RUNNING);
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
if (netif_msg_drv(pegasus))
static struct workqueue_struct *pegasus_workqueue = NULL;
#define CARRIER_CHECK_DELAY (2 * HZ)
-static void check_carrier(void *data)
+static void check_carrier(struct work_struct *work)
{
- pegasus_t *pegasus = data;
+ pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
set_carrier(pegasus->net);
if (!(pegasus->flags & PEGASUS_UNPLUG)) {
queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
- INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
+ INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
pegasus->intf = intf;
pegasus->usb = dev;
e = alt->endpoint + ep;
switch (e->desc.bmAttributes) {
case USB_ENDPOINT_XFER_INT:
- if (!(e->desc.bEndpointAddress & USB_DIR_IN))
+ if (!usb_endpoint_dir_in(&e->desc))
continue;
intr = 1;
/* FALLTHROUGH */
default:
continue;
}
- if (e->desc.bEndpointAddress & USB_DIR_IN) {
+ if (usb_endpoint_dir_in(&e->desc)) {
if (!intr && !in)
in = e;
else if (intr && !status)
* especially now that control transfers can be queued.
*/
static void
-kevent (void *data)
+kevent (struct work_struct *work)
{
- struct usbnet *dev = data;
+ struct usbnet *dev =
+ container_of(work, struct usbnet, kevent);
int status;
/* usb_clear_halt() needs a thread context */
skb_queue_head_init (&dev->done);
dev->bh.func = usbnet_bh;
dev->bh.data = (unsigned long) dev;
- INIT_WORK (&dev->kevent, kevent, dev);
+ INIT_WORK (&dev->kevent, kevent);
dev->delay.function = usbnet_bh;
dev->delay.data = (unsigned long) dev;
init_timer (&dev->delay);
struct circ_buf *rx_buf; /* read buffer */
int rx_flags; /* for throttilng */
struct work_struct rx_work; /* work cue for the receiving line */
+ struct usb_serial_port *port; /* USB port with which associated */
};
/* Private methods */
schedule_work(&port->work);
}
-static void aircable_read(void *params)
+static void aircable_read(struct work_struct *work)
{
- struct usb_serial_port *port = params;
- struct aircable_private *priv = usb_get_serial_port_data(port);
+ struct aircable_private *priv =
+ container_of(work, struct aircable_private, rx_work);
+ struct usb_serial_port *port = priv->port;
struct tty_struct *tty;
unsigned char *data;
int count;
*/
tty = port->tty;
- if (!tty)
+ if (!tty) {
schedule_work(&priv->rx_work);
+ err("%s - No tty available", __FUNCTION__);
+ return ;
+ }
count = min(64, serial_buf_data_avail(priv->rx_buf));
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint = &iface_desc->endpoint[i].desc;
- if (((endpoint->bEndpointAddress & 0x80) == 0x00) &&
- ((endpoint->bmAttributes & 3) == 0x02)) {
- /* we found our bulk out endpoint */
+ if (usb_endpoint_is_bulk_out(endpoint)) {
dbg("found bulk out on endpoint %d", i);
++num_bulk_out;
}
}
priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
- INIT_WORK(&priv->rx_work, aircable_read, port);
+ priv->port = port;
+ INIT_WORK(&priv->rx_work, aircable_read);
usb_set_serial_port_data(serial->port[0], priv);
package_length - shift);
}
}
- aircable_read(port);
+ aircable_read(&priv->rx_work);
}
/* Schedule the next read _if_ we are still open */
* to TASK_RUNNING will be lost and write_chan's subsequent call to
* schedule() will never return (unless it catches a signal).
* This race condition occurs because write_bulk_callback() (and thus
- * the wakeup) are called asynchonously from an interrupt, rather than
+ * the wakeup) are called asynchronously from an interrupt, rather than
* from the scheduler. We can avoid the race by calling the wakeup
* from the scheduler queue and that's our fix: Now, at the end of
* write_bulk_callback() we queue up a wakeup call on the scheduler
int dp_in_close; /* close in progress */
wait_queue_head_t dp_close_wait; /* wait queue for close */
struct work_struct dp_wakeup_work;
+ struct usb_serial_port *dp_port;
};
/* Local Function Declarations */
static void digi_wakeup_write( struct usb_serial_port *port );
-static void digi_wakeup_write_lock(void *);
+static void digi_wakeup_write_lock(struct work_struct *work);
static int digi_write_oob_command( struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible );
static int digi_write_inb_command( struct usb_serial_port *port,
* on writes.
*/
-static void digi_wakeup_write_lock(void *arg)
+static void digi_wakeup_write_lock(struct work_struct *work)
{
- struct usb_serial_port *port = arg;
+ struct digi_port *priv =
+ container_of(work, struct digi_port, dp_wakeup_work);
+ struct usb_serial_port *port = priv->dp_port;
unsigned long flags;
- struct digi_port *priv = usb_get_serial_port_data(port);
spin_lock_irqsave( &priv->dp_port_lock, flags );
init_waitqueue_head( &priv->dp_flush_wait );
priv->dp_in_close = 0;
init_waitqueue_head( &priv->dp_close_wait );
- INIT_WORK(&priv->dp_wakeup_work,
- digi_wakeup_write_lock, serial->port[i]);
+ INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+ priv->dp_port = serial->port[i];
/* initialize write wait queue for this port */
init_waitqueue_head( &serial->port[i]->write_wait );
char prev_status, diff_status; /* Used for TIOCMIWAIT */
__u8 rx_flags; /* receive state flags (throttling) */
spinlock_t rx_lock; /* spinlock for receive state */
- struct work_struct rx_work;
+ struct delayed_work rx_work;
+ struct usb_serial_port *port;
int rx_processed;
unsigned long rx_bytes;
static int ftdi_chars_in_buffer (struct usb_serial_port *port);
static void ftdi_write_bulk_callback (struct urb *urb);
static void ftdi_read_bulk_callback (struct urb *urb);
-static void ftdi_process_read (void *param);
+static void ftdi_process_read (struct work_struct *work);
static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old);
static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file);
static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
port->read_urb->transfer_buffer_length = BUFSZ;
}
- INIT_WORK(&priv->rx_work, ftdi_process_read, port);
+ INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
+ priv->port = port;
/* Free port's existing write urb and transfer buffer. */
if (port->write_urb) {
flush_scheduled_work();
/* shutdown our bulk read */
- if (port->read_urb)
- usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->read_urb);
} /* ftdi_close */
priv->rx_bytes += countread;
spin_unlock_irqrestore(&priv->rx_lock, flags);
- ftdi_process_read(port);
+ ftdi_process_read(&priv->rx_work.work);
} /* ftdi_read_bulk_callback */
-static void ftdi_process_read (void *param)
+static void ftdi_process_read (struct work_struct *work)
{ /* ftdi_process_read */
- struct usb_serial_port *port = (struct usb_serial_port*)param;
+ struct ftdi_private *priv =
+ container_of(work, struct ftdi_private, rx_work.work);
+ struct usb_serial_port *port = priv->port;
struct urb *urb;
struct tty_struct *tty;
- struct ftdi_private *priv;
char error_flag;
unsigned char *data;
spin_unlock_irqrestore(&priv->rx_lock, flags);
if (actually_throttled)
- schedule_work(&priv->rx_work);
+ schedule_delayed_work(&priv->rx_work, 0);
}
static int __init ftdi_init (void)
schedule_work(&port->work);
}
-static void usb_serial_port_work(void *private)
+static void usb_serial_port_work(struct work_struct *work)
{
- struct usb_serial_port *port = private;
+ struct usb_serial_port *port =
+ container_of(work, struct usb_serial_port, work);
struct tty_struct *tty;
dbg("%s - port %d", __FUNCTION__, port->number);
port->serial = serial;
spin_lock_init(&port->lock);
mutex_init(&port->mutex);
- INIT_WORK(&port->work, usb_serial_port_work, port);
+ INIT_WORK(&port->work, usb_serial_port_work);
serial->port[i] = port;
}
port = serial->port[i];
if (!port)
continue;
- if (port->read_urb)
- usb_free_urb (port->read_urb);
+ usb_free_urb(port->read_urb);
kfree(port->bulk_in_buffer);
}
for (i = 0; i < num_bulk_out; ++i) {
port = serial->port[i];
if (!port)
continue;
- if (port->write_urb)
- usb_free_urb (port->write_urb);
+ usb_free_urb(port->write_urb);
kfree(port->bulk_out_buffer);
}
for (i = 0; i < num_interrupt_in; ++i) {
port = serial->port[i];
if (!port)
continue;
- if (port->interrupt_in_urb)
- usb_free_urb (port->interrupt_in_urb);
+ usb_free_urb(port->interrupt_in_urb);
kfree(port->interrupt_in_buffer);
}
for (i = 0; i < num_interrupt_out; ++i) {
port = serial->port[i];
if (!port)
continue;
- if (port->interrupt_out_urb)
- usb_free_urb (port->interrupt_out_urb);
+ usb_free_urb(port->interrupt_out_urb);
kfree(port->interrupt_out_buffer);
}
static struct workqueue_struct *aio_wq;
/* Used for rare fput completion. */
-static void aio_fput_routine(void *);
-static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+static void aio_fput_routine(struct work_struct *);
+static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
-static void aio_kick_handler(void *);
+static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
/* aio_setup
INIT_LIST_HEAD(&ctx->active_reqs);
INIT_LIST_HEAD(&ctx->run_list);
- INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+ INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
if (aio_setup_ring(ctx) < 0)
goto out_freectx;
{
unsigned nr_events = ctx->max_reqs;
- if (unlikely(ctx->reqs_active))
- BUG();
+ BUG_ON(ctx->reqs_active);
cancel_delayed_work(&ctx->wq);
flush_workqueue(aio_wq);
wake_up(&ctx->wait);
}
-static void aio_fput_routine(void *data)
+static void aio_fput_routine(struct work_struct *data)
{
spin_lock_irq(&fput_lock);
while (likely(!list_empty(&fput_head))) {
assert_spin_locked(&ctx->ctx_lock);
req->ki_users --;
- if (unlikely(req->ki_users < 0))
- BUG();
+ BUG_ON(req->ki_users < 0);
if (likely(req->ki_users))
return 0;
list_del(&req->ki_list); /* remove from active_reqs */
* space.
* Run on aiod's context.
*/
-static void aio_kick_handler(void *data)
+static void aio_kick_handler(struct work_struct *work)
{
- struct kioctx *ctx = data;
+ struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
mm_segment_t oldfs = get_fs();
int requeue;
* we're in a worker thread already, don't use queue_delayed_work,
*/
if (requeue)
- queue_work(aio_wq, &ctx->wq);
+ queue_delayed_work(aio_wq, &ctx->wq, 0);
}
break;
}
- if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
- ret = -EINVAL;
+ if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
break;
- }
len -= bytes;
}
nr_pages += end - start;
/*
- * transfer and buffer must be aligned to at least hardsector
- * size for now, in the future we can relax this restriction
+ * buffer must be aligned to at least hardsector size for now
*/
- if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ if (uaddr & queue_dma_alignment(q))
return ERR_PTR(-EINVAL);
}
int write_to_vm)
{
struct bio *bio;
- int len = 0, i;
bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
*/
bio_get(bio);
- for (i = 0; i < iov_count; i++)
- len += iov[i].iov_len;
-
- if (bio->bi_size == len)
- return bio;
-
- /*
- * don't support partial mappings
- */
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio);
- return ERR_PTR(-EINVAL);
+ return bio;
}
static void __bio_unmap_user(struct bio *bio)
* run one bio_put() against the BIO.
*/
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;
/*
* This runs in process context
*/
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
{
unsigned long flags;
struct bio *bio;
struct reiserfs_journal *journal);
static int dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
static void queue_log_writer(struct super_block *s);
/* values for join in do_journal_begin_r */
}
/* if someone has this block in a newer transaction, just make
- ** sure they are commited, and don't try writing it to disk
+ ** sure they are committed, and don't try writing it to disk
*/
if (pjl) {
if (atomic_read(&pjl->j_commit_left))
if (reiserfs_mounted_fs_count <= 1)
commit_wq = create_workqueue("reiserfs");
- INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+ INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+ journal->j_work_sb = p_s_sb;
return 0;
free_and_return:
free_journal_ram(p_s_sb);
/*
** for any cnode in a journal list, it can only be dirtied of all the
- ** transactions that include it are commited to disk.
+ ** transactions that include it are committed to disk.
** this checks through each transaction, and returns 1 if you are allowed to dirty,
** and 0 if you aren't
**
}
/* syncs the commit blocks, but does not force the real buffers to disk
- ** will wait until the current transaction is done/commited before returning
+ ** will wait until the current transaction is done/committed before returning
*/
int journal_end_sync(struct reiserfs_transaction_handle *th,
struct super_block *p_s_sb, unsigned long nblocks)
/*
** writeback the pending async commits to disk
*/
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
{
- struct super_block *p_s_sb = p;
- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+ struct reiserfs_journal *journal =
+ container_of(work, struct reiserfs_journal, j_work.work);
+ struct super_block *p_s_sb = journal->j_work_sb;
struct reiserfs_journal_list *jl;
struct list_head *entry;
struct device;
struct mmc_host {
- struct device *dev;
- struct class_device class_dev;
+ struct device *parent;
+ struct device class_dev;
int index;
const struct mmc_host_ops *ops;
unsigned int f_min;
struct mmc_card *card_busy; /* the MMC card claiming host */
struct mmc_card *card_selected; /* the selected MMC card */
- struct work_struct detect;
+ struct delayed_work detect;
unsigned long private[0] ____cacheline_aligned;
};
return (void *)host->private;
}
- #define mmc_dev(x) ((x)->dev)
- #define mmc_hostname(x) ((x)->class_dev.class_id)
+ #define mmc_dev(x) ((x)->parent)
+ #define mmc_hostname(x) ((x)->class_dev.bus_id)
extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
extern int mmc_resume_host(struct mmc_host *);
int j_errno;
/* when flushing ordered buffers, throttle new ordered writers */
- struct work_struct j_work;
+ struct delayed_work j_work;
+ struct super_block *j_work_sb;
atomic_t j_async_throttle;
};
/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
** is not required. If the normal autodection code can't determine which
- ** hash to use (because both hases had the same value for a file)
+ ** hash to use (because both hashes had the same value for a file)
** use this option to force a specific hash. It won't allow you to override
** the existing hash on the FS, so if you have a tea hash disk, and mount
** with -o hash=rupasov, the mount will fail.
};
struct tty_bufhead {
- struct work_struct work;
+ struct delayed_work work;
struct semaphore pty_sem;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
extern int tty_unregister_ldisc(int disc);
extern int tty_register_driver(struct tty_driver *driver);
extern int tty_unregister_driver(struct tty_driver *driver);
- extern struct class_device *tty_register_device(struct tty_driver *driver,
- unsigned index,
- struct device *dev);
+ extern struct device *tty_register_device(struct tty_driver *driver,
+ unsigned index, struct device *dev);
extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
int buflen);
/* This is arbitrary.
* From USB 2.0 spec Table 11-13, offset 7, a hub can
* have up to 255 ports. The most yet reported is 10.
+ *
+ * Current Wireless USB host hardware (Intel i1480 for example) allows
+ * up to 22 devices to connect. Upcoming hardware might raise that
+ * limit. Because the arrays need to add a bit for hub status data, we
+ * do 31, so plus one evens out to four bytes.
*/
- #define USB_MAXCHILDREN (16)
+ #define USB_MAXCHILDREN (31)
struct usb_tt;
u8 portnum; /* Parent port number (origin 1) */
u8 level; /* Number of USB hub ancestors */
- int have_langid; /* whether string_langid is valid */
+ unsigned discon_suspended:1; /* Disconnected while suspended */
+ unsigned have_langid:1; /* whether string_langid is valid */
int string_langid; /* language ID for strings */
/* static strings from the device */
int pm_usage_cnt; /* usage counter for autosuspend */
#ifdef CONFIG_PM
- struct work_struct autosuspend; /* for delayed autosuspends */
+ struct delayed_work autosuspend; /* for delayed autosuspends */
struct mutex pm_mutex; /* protects PM operations */
unsigned auto_pm:1; /* autosuspend/resume in progress */
/* USB autosuspend and autoresume */
#ifdef CONFIG_USB_SUSPEND
+ extern int usb_autopm_set_interface(struct usb_interface *intf);
extern int usb_autopm_get_interface(struct usb_interface *intf);
extern void usb_autopm_put_interface(struct usb_interface *intf);
+ static inline void usb_autopm_enable(struct usb_interface *intf)
+ {
+ intf->pm_usage_cnt = 0;
+ usb_autopm_set_interface(intf);
+ }
+
+ static inline void usb_autopm_disable(struct usb_interface *intf)
+ {
+ intf->pm_usage_cnt = 1;
+ usb_autopm_set_interface(intf);
+ }
+
#else
- #define usb_autopm_get_interface(intf) 0
- #define usb_autopm_put_interface(intf) do {} while (0)
- #endif
+ static inline int usb_autopm_set_interface(struct usb_interface *intf)
+ { return 0; }
+
+ static inline int usb_autopm_get_interface(struct usb_interface *intf)
+ { return 0; }
+
+ static inline void usb_autopm_put_interface(struct usb_interface *intf)
+ { }
+ static inline void usb_autopm_enable(struct usb_interface *intf)
+ { }
+ static inline void usb_autopm_disable(struct usb_interface *intf)
+ { }
+ #endif
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
- extern int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd);
+ /**
+ * usb_endpoint_dir_in - check if the endpoint has IN direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type IN, otherwise it returns false.
+ */
+ static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+ {
+ return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
+ }
+
+ /**
+ * usb_endpoint_dir_out - check if the endpoint has OUT direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type OUT, otherwise it returns false.
+ */
+ static inline int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
+ {
+ return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+ }
+
+ /**
+ * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type bulk, otherwise it returns false.
+ */
+ static inline int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
+ {
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK);
+ }
+
+ /**
+ * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type interrupt, otherwise it returns
+ * false.
+ */
+ static inline int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
+ {
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT);
+ }
+
+ /**
+ * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type isochronous, otherwise it returns
+ * false.
+ */
+ static inline int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
+ {
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_ISOC);
+ }
+
+ /**
+ * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and IN direction,
+ * otherwise it returns false.
+ */
+ static inline int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
+ {
+ return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
+ }
+
+ /**
+ * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+ static inline int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
+ {
+ return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
+ }
+
+ /**
+ * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and IN direction,
+ * otherwise it returns false.
+ */
+ static inline int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
+ {
+ return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
+ }
+
+ /**
+ * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+ static inline int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
+ {
+ return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
+ }
+
+ /**
+ * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and IN direction,
+ * otherwise it returns false.
+ */
+ static inline int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
+ {
+ return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
+ }
+
+ /**
+ * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+ static inline int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
+ {
+ return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
+ }
/*-------------------------------------------------------------------------*/
struct net_device *);
void (*dst_saddr) (union sctp_addr *saddr,
struct dst_entry *dst,
- unsigned short port);
+ __be16 port);
int (*cmp_addr) (const union sctp_addr *addr1,
const union sctp_addr *addr2);
void (*addr_copy) (union sctp_addr *dst,
struct sock *sk);
void (*from_addr_param) (union sctp_addr *,
union sctp_addr_param *,
- __u16 port, int iif);
+ __be16 port, int iif);
int (*to_addr_param) (const union sctp_addr *,
union sctp_addr_param *);
int (*addr_valid) (union sctp_addr *,
struct sctp_sock *,
const struct sk_buff *);
sctp_scope_t (*scope) (union sctp_addr *);
- void (*inaddr_any) (union sctp_addr *, unsigned short);
+ void (*inaddr_any) (union sctp_addr *, __be16);
int (*is_any) (const union sctp_addr *);
int (*available) (union sctp_addr *,
struct sctp_sock *);
struct sctp_sock *);
int (*bind_verify) (struct sctp_sock *, union sctp_addr *);
int (*send_verify) (struct sctp_sock *, union sctp_addr *);
- int (*supported_addrs)(const struct sctp_sock *, __u16 *);
+ int (*supported_addrs)(const struct sctp_sock *, __be16 *);
struct sock *(*create_accept_sk) (struct sock *sk,
struct sctp_association *asoc);
void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
void sctp_inq_free(struct sctp_inq *);
void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
-void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
/* This is the structure we use to hold outbound chunks. You push
* chunks in and they automatically pop out the other end as bundled
* this here so we pre-allocate this once and can re-use
* on every receive.
*/
- __u8 digest[SCTP_SIGNATURE_SIZE];
+ __u8 *digest;
/* sendbuf acct. policy. */
__u32 sndbuf_policy;
__u32 sctp_generate_tag(const struct sctp_endpoint *);
__u32 sctp_generate_tsn(const struct sctp_endpoint *);
+ struct sctp_inithdr_host {
+ __u32 init_tag;
+ __u32 a_rwnd;
+ __u16 num_outbound_streams;
+ __u16 num_inbound_streams;
+ __u32 initial_tsn;
+ };
/* RFC2960
*
/* This mask is used to disable sending the ASCONF chunk
* with specified parameter to peer.
*/
- __u16 addip_disabled_mask;
+ __be16 addip_disabled_mask;
- struct sctp_inithdr i;
+ struct sctp_inithdr_host i;
int cookie_len;
void *cookie;
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_sas.h>
+ #include <asm/scatterlist.h>
struct block_device;
void *lldd_dev;
};
+struct sas_discovery_event {
+ struct work_struct work;
+ struct asd_sas_port *port;
+};
+
struct sas_discovery {
spinlock_t disc_event_lock;
- struct work_struct disc_work[DISC_NUM_EVENTS];
+ struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
unsigned long pending;
u8 fanout_sas_addr[8];
u8 eeds_a[8];
void *lldd_port; /* not touched by the sas class code */
};
+struct asd_sas_event {
+ struct work_struct work;
+ struct asd_sas_phy *phy;
+};
+
/* The phy pretty much is controlled by the LLDD.
* The class only reads those fields.
*/
struct asd_sas_phy {
/* private: */
/* protected by ha->event_lock */
- struct work_struct port_events[PORT_NUM_EVENTS];
- struct work_struct phy_events[PHY_NUM_EVENTS];
+ struct asd_sas_event port_events[PORT_NUM_EVENTS];
+ struct asd_sas_event phy_events[PHY_NUM_EVENTS];
unsigned long port_events_pending;
unsigned long phy_events_pending;
int queue_thread_kill;
};
+struct sas_ha_event {
+ struct work_struct work;
+ struct sas_ha_struct *ha;
+};
+
struct sas_ha_struct {
/* private: */
spinlock_t event_lock;
- struct work_struct ha_events[HA_NUM_EVENTS];
+ struct sas_ha_event ha_events[HA_NUM_EVENTS];
unsigned long pending;
struct scsi_core core;
#endif /* CONFIG_KMOD */
struct subprocess_info {
+ struct work_struct work;
struct completion *complete;
char *path;
char **argv;
}
/* This is run by khelper thread */
-static void __call_usermodehelper(void *data)
+static void __call_usermodehelper(struct work_struct *work)
{
- struct subprocess_info *sub_info = data;
+ struct subprocess_info *sub_info =
+ container_of(work, struct subprocess_info, work);
pid_t pid;
int wait = sub_info->wait;
{
DECLARE_COMPLETION_ONSTACK(done);
struct subprocess_info sub_info = {
+ .work = __WORK_INITIALIZER(sub_info.work,
+ __call_usermodehelper),
.complete = &done,
.path = path,
.argv = argv,
.wait = wait,
.retval = 0,
};
- DECLARE_WORK(work, __call_usermodehelper, &sub_info);
if (!khelper_wq)
return -EBUSY;
if (path[0] == '\0')
return 0;
- queue_work(khelper_wq, &work);
+ queue_work(khelper_wq, &sub_info.work);
wait_for_completion(&done);
return sub_info.retval;
}
{
DECLARE_COMPLETION(done);
struct subprocess_info sub_info = {
+ .work = __WORK_INITIALIZER(sub_info.work,
+ __call_usermodehelper),
.complete = &done,
.path = path,
.argv = argv,
.retval = 0,
};
struct file *f;
- DECLARE_WORK(work, __call_usermodehelper, &sub_info);
if (!khelper_wq)
return -EBUSY;
return 0;
f = create_write_pipe();
- if (!f)
- return -ENOMEM;
+ if (IS_ERR(f))
+ return PTR_ERR(f);
*filp = f;
f = create_read_pipe(f);
- if (!f) {
+ if (IS_ERR(f)) {
free_write_pipe(*filp);
- return -ENOMEM;
+ return PTR_ERR(f);
}
sub_info.stdin = f;
- queue_work(khelper_wq, &work);
+ queue_work(khelper_wq, &sub_info.work);
wait_for_completion(&done);
return sub_info.retval;
}
memset(rdesc, 0, ETH_ALEN);
/* offset 4 comes from LAN destination field in LE control frames */
if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
- memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(uint16_t));
+ memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
else {
- memcpy(&rdesc[4], &trh->rseg[1], sizeof(uint16_t));
+ memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
}
unsigned char *src, *dst;
atm_return(vcc, skb->truesize);
- if (*(uint16_t *) skb->data == htons(priv->lecid) ||
+ if (*(__be16 *) skb->data == htons(priv->lecid) ||
!priv->lecd || !(dev->flags & IFF_UP)) {
/*
* Probably looping back, or if lecd is missing,
if (table == NULL)
return -1;
- *tlvs = kmalloc(table->sizeoftlvs, GFP_ATOMIC);
+ *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
if (*tlvs == NULL)
return -1;
- memcpy(*tlvs, table->tlvs, table->sizeoftlvs);
*sizeoftlvs = table->sizeoftlvs;
return 0;
kfree(priv->tlvs); /* NULL if there was no previous association */
- priv->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
+ priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
if (priv->tlvs == NULL)
return (0);
priv->sizeoftlvs = sizeoftlvs;
- memcpy(priv->tlvs, tlvs, sizeoftlvs);
skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
if (skb == NULL)
kfree(entry->tlvs);
- entry->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
+ entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
if (entry->tlvs == NULL)
return;
-
entry->sizeoftlvs = sizeoftlvs;
- memcpy(entry->tlvs, tlvs, sizeoftlvs);
#endif
#if 0
printk("lec.c: lane2_associate_ind()\n");
#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
static void lec_arp_expire_arp(unsigned long data);
/*
INIT_HLIST_HEAD(&priv->lec_no_forward);
INIT_HLIST_HEAD(&priv->mcast_fwds);
spin_lock_init(&priv->lec_arp_lock);
- INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+ INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
}
* to ESI_FORWARD_DIRECT. This causes the flush period to end
* regardless of the progress of the flush protocol.
*/
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
{
unsigned long flags;
- struct lec_priv *priv = data;
+ struct lec_priv *priv =
+ container_of(work, struct lec_priv, lec_arp_work.work);
struct hlist_node *node, *next;
struct lec_arp_table *entry;
unsigned long now;
#define LEC_HEADER_LEN 16
struct lecdatahdr_8023 {
- unsigned short le_header;
+ __be16 le_header;
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
- unsigned short h_type;
+ __be16 h_type;
};
struct lecdatahdr_8025 {
- unsigned short le_header;
+ __be16 le_header;
unsigned char ac_pad;
unsigned char fc;
unsigned char h_dest[ETH_ALEN];
spinlock_t lec_arp_lock;
struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
struct atm_vcc *lecd;
- struct work_struct lec_arp_work; /* C10 */
+ struct delayed_work lec_arp_work; /* C10 */
unsigned int maximum_unknown_frame_count;
/*
* Within the period of time defined by this variable, the client will send
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
- #define MAX_RETRIES 20000
- static DEFINE_SPINLOCK(skb_list_lock);
- static int nr_skbs;
- static struct sk_buff *skbs;
-
- static DEFINE_SPINLOCK(queue_lock);
- static int queue_depth;
- static struct sk_buff *queue_head, *queue_tail;
+ static struct sk_buff_head skb_pool;
static atomic_t trapped;
+ #define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
{
- unsigned long flags;
- struct netpoll_info *npinfo = p;
++ struct netpoll_info *npinfo =
++ container_of(work, struct netpoll_info, tx_work.work);
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
-
- spin_unlock_irqrestore(&queue_lock, flags);
-
- dev_queue_xmit(skb);
- }
- }
+ while ((skb = skb_dequeue(&npinfo->txq))) {
+ struct net_device *dev = skb->dev;
- static DECLARE_WORK(send_queue, queue_process);
+ if (!netif_device_present(dev) || !netif_running(dev)) {
+ __kfree_skb(skb);
+ continue;
+ }
- void netpoll_queue(struct sk_buff *skb)
- {
- unsigned long flags;
+ netif_tx_lock_bh(dev);
+ if (netif_queue_stopped(dev) ||
+ dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+ skb_queue_head(&npinfo->txq, skb);
+ netif_tx_unlock_bh(dev);
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
+ schedule_delayed_work(&npinfo->tx_work, HZ/10);
+ return;
+ }
-
- netif_tx_unlock_bh(dev);
}
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
-
- schedule_work(&send_queue);
}
- static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
- unsigned short ulen, u32 saddr, u32 daddr)
+ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
+ unsigned short ulen, __be32 saddr, __be32 daddr)
{
- unsigned int psum;
+ __wsum psum;
if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
return 0;
psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
if (skb->ip_summed == CHECKSUM_COMPLETE &&
- !(u16)csum_fold(csum_add(psum, skb->csum)))
+ !csum_fold(csum_add(psum, skb->csum)))
return 0;
skb->csum = psum;
arp_reply(skb);
skb = skb_dequeue(&npi->arp_tx);
}
- return;
}
void netpoll_poll(struct netpoll *np)
{
- if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
+ if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
return;
/* Process pending work on NIC */
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&skb_list_lock, flags);
- while (nr_skbs < MAX_SKBS) {
+ spin_lock_irqsave(&skb_pool.lock, flags);
+ while (skb_pool.qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
- skb->next = skbs;
- skbs = skb;
- nr_skbs++;
+ __skb_queue_tail(&skb_pool, skb);
}
- spin_unlock_irqrestore(&skb_list_lock, flags);
+ spin_unlock_irqrestore(&skb_pool.lock, flags);
}
static void zap_completion_queue(void)
while (clist != NULL) {
struct sk_buff *skb = clist;
clist = clist->next;
- if(skb->destructor)
+ if (skb->destructor)
dev_kfree_skb_any(skb); /* put this one back */
else
__kfree_skb(skb);
put_cpu_var(softnet_data);
}
- static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
- int once = 1, count = 0;
- unsigned long flags;
- struct sk_buff *skb = NULL;
+ int count = 0;
+ struct sk_buff *skb;
zap_completion_queue();
+ refill_skbs();
repeat:
- if (nr_skbs < MAX_SKBS)
- refill_skbs();
skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ skb = skb_dequeue(&skb_pool);
if (!skb) {
- spin_lock_irqsave(&skb_list_lock, flags);
- skb = skbs;
- if (skb) {
- skbs = skb->next;
- skb->next = NULL;
- nr_skbs--;
+ if (++count < 10) {
+ netpoll_poll(np);
+ goto repeat;
}
- spin_unlock_irqrestore(&skb_list_lock, flags);
- }
-
- if(!skb) {
- count++;
- if (once && (count == 1000000)) {
- printk("out of netpoll skbs!\n");
- once = 0;
- }
- netpoll_poll(np);
- goto repeat;
+ return NULL;
}
atomic_set(&skb->users, 1);
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status;
- struct netpoll_info *npinfo;
-
- if (!np || !np->dev || !netif_running(np->dev)) {
- __kfree_skb(skb);
- return;
- }
-
- npinfo = np->dev->npinfo;
-
- /* avoid recursion */
- if (npinfo->poll_owner == smp_processor_id() ||
- np->dev->xmit_lock_owner == smp_processor_id()) {
- if (np->drop)
- np->drop(skb);
- else
- __kfree_skb(skb);
- return;
- }
-
- do {
- npinfo->tries--;
- netif_tx_lock(np->dev);
+ int status = NETDEV_TX_BUSY;
+ unsigned long tries;
+ struct net_device *dev = np->dev;
+ struct netpoll_info *npinfo = np->dev->npinfo;
+
+ if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ /* don't get messages out of order, and no recursion */
+ if (skb_queue_len(&npinfo->txq) == 0 &&
+ npinfo->poll_owner != smp_processor_id() &&
+ netif_tx_trylock(dev)) {
+ /* try until next clock tick */
+ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
+ if (!netif_queue_stopped(dev))
+ status = dev->hard_start_xmit(skb, dev);
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- status = NETDEV_TX_BUSY;
- if (!netif_queue_stopped(np->dev))
- status = np->dev->hard_start_xmit(skb, np->dev);
+ if (status == NETDEV_TX_OK)
+ break;
- netif_tx_unlock(np->dev);
+ /* tickle device maybe there is some cleanup */
+ netpoll_poll(np);
- /* success */
- if(!status) {
- npinfo->tries = MAX_RETRIES; /* reset */
- return;
+ udelay(USEC_PER_POLL);
}
+ netif_tx_unlock(dev);
+ }
- /* transmit busy */
- netpoll_poll(np);
- udelay(50);
- } while (npinfo->tries > 0);
+ if (status != NETDEV_TX_OK) {
+ skb_queue_tail(&npinfo->txq, skb);
- schedule_work(&npinfo->tx_work);
++ schedule_delayed_work(&npinfo->tx_work,0);
+ }
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
udp_len, IPPROTO_UDP,
csum_partial((unsigned char *)udph, udp_len, 0));
if (udph->check == 0)
- udph->check = -1;
+ udph->check = CSUM_MANGLED_0;
skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
struct arphdr *arp;
unsigned char *arp_ptr;
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
- u32 sip, tip;
+ __be32 sip, tip;
struct sk_buff *send_skb;
struct netpoll *np = NULL;
if (np->dev->hard_header &&
np->dev->hard_header(send_skb, skb->dev, ptype,
- np->remote_mac, np->local_mac,
- send_skb->len) < 0) {
+ np->remote_mac, np->local_mac,
+ send_skb->len) < 0) {
kfree_skb(send_skb);
return;
}
struct netpoll_info *npi = skb->dev->npinfo;
struct netpoll *np = npi->rx_np;
-
if (!np)
goto out;
if (skb->dev->type != ARPHRD_ETHER)
{
char *cur=opt, *delim;
- if(*cur != '@') {
+ if (*cur != '@') {
if ((delim = strchr(cur, '@')) == NULL)
goto parse_failed;
- *delim=0;
- np->local_port=simple_strtol(cur, NULL, 10);
- cur=delim;
+ *delim = 0;
+ np->local_port = simple_strtol(cur, NULL, 10);
+ cur = delim;
}
cur++;
printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
- if(*cur != '/') {
+ if (*cur != '/') {
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
- *delim=0;
- np->local_ip=ntohl(in_aton(cur));
- cur=delim;
+ *delim = 0;
+ np->local_ip = ntohl(in_aton(cur));
+ cur = delim;
printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
np->name, HIPQUAD(np->local_ip));
}
cur++;
- if ( *cur != ',') {
+ if (*cur != ',') {
/* parse out dev name */
if ((delim = strchr(cur, ',')) == NULL)
goto parse_failed;
- *delim=0;
+ *delim = 0;
strlcpy(np->dev_name, cur, sizeof(np->dev_name));
- cur=delim;
+ cur = delim;
}
cur++;
printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
- if ( *cur != '@' ) {
+ if (*cur != '@') {
/* dst port */
if ((delim = strchr(cur, '@')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_port=simple_strtol(cur, NULL, 10);
- cur=delim;
+ *delim = 0;
+ np->remote_port = simple_strtol(cur, NULL, 10);
+ cur = delim;
}
cur++;
printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
/* dst ip */
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_ip=ntohl(in_aton(cur));
- cur=delim+1;
+ *delim = 0;
+ np->remote_ip = ntohl(in_aton(cur));
+ cur = delim + 1;
printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
- np->name, HIPQUAD(np->remote_ip));
+ np->name, HIPQUAD(np->remote_ip));
- if( *cur != 0 )
- {
+ if (*cur != 0) {
/* MAC address */
if ((delim = strchr(cur, ':')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_mac[0]=simple_strtol(cur, NULL, 16);
- cur=delim+1;
+ *delim = 0;
+ np->remote_mac[0] = simple_strtol(cur, NULL, 16);
+ cur = delim + 1;
if ((delim = strchr(cur, ':')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_mac[1]=simple_strtol(cur, NULL, 16);
- cur=delim+1;
+ *delim = 0;
+ np->remote_mac[1] = simple_strtol(cur, NULL, 16);
+ cur = delim + 1;
if ((delim = strchr(cur, ':')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_mac[2]=simple_strtol(cur, NULL, 16);
- cur=delim+1;
+ *delim = 0;
+ np->remote_mac[2] = simple_strtol(cur, NULL, 16);
+ cur = delim + 1;
if ((delim = strchr(cur, ':')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_mac[3]=simple_strtol(cur, NULL, 16);
- cur=delim+1;
+ *delim = 0;
+ np->remote_mac[3] = simple_strtol(cur, NULL, 16);
+ cur = delim + 1;
if ((delim = strchr(cur, ':')) == NULL)
goto parse_failed;
- *delim=0;
- np->remote_mac[4]=simple_strtol(cur, NULL, 16);
- cur=delim+1;
- np->remote_mac[5]=simple_strtol(cur, NULL, 16);
+ *delim = 0;
+ np->remote_mac[4] = simple_strtol(cur, NULL, 16);
+ cur = delim + 1;
+ np->remote_mac[5] = simple_strtol(cur, NULL, 16);
}
printk(KERN_INFO "%s: remote ethernet address "
struct in_device *in_dev;
struct netpoll_info *npinfo;
unsigned long flags;
+ int err;
if (np->dev_name)
ndev = dev_get_by_name(np->dev_name);
if (!ndev) {
printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
np->name, np->dev_name);
- return -1;
+ return -ENODEV;
}
np->dev = ndev;
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
- if (!npinfo)
+ if (!npinfo) {
+ err = -ENOMEM;
goto release;
+ }
npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1;
- npinfo->tries = MAX_RETRIES;
+
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
- } else
+ skb_queue_head_init(&npinfo->txq);
- INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
++ INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
+
+ atomic_set(&npinfo->refcnt, 1);
+ } else {
npinfo = ndev->npinfo;
+ atomic_inc(&npinfo->refcnt);
+ }
if (!ndev->poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
+ err = -ENOTSUPP;
goto release;
}
np->name, np->dev_name);
rtnl_lock();
- if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
+ err = dev_open(ndev);
+ rtnl_unlock();
+
+ if (err) {
printk(KERN_ERR "%s: failed to open %s\n",
- np->name, np->dev_name);
- rtnl_unlock();
+ np->name, ndev->name);
goto release;
}
- rtnl_unlock();
atleast = jiffies + HZ/10;
atmost = jiffies + 4*HZ;
rcu_read_unlock();
printk(KERN_ERR "%s: no IP address for %s, aborting\n",
np->name, np->dev_name);
+ err = -EDESTADDRREQ;
goto release;
}
kfree(npinfo);
np->dev = NULL;
dev_put(ndev);
- return -1;
+ return err;
}
+ static int __init netpoll_init(void)
+ {
+ skb_queue_head_init(&skb_pool);
+ return 0;
+ }
+ core_initcall(netpoll_init);
+
void netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
if (np->dev) {
npinfo = np->dev->npinfo;
- if (npinfo && npinfo->rx_np == np) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- npinfo->rx_np = NULL;
- npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ if (npinfo) {
+ if (npinfo->rx_np == np) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ npinfo->rx_np = NULL;
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ }
+
+ np->dev->npinfo = NULL;
+ if (atomic_dec_and_test(&npinfo->refcnt)) {
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+ cancel_rearming_delayed_work(&npinfo->tx_work);
+ flush_scheduled_work();
+
+ kfree(npinfo);
+ }
}
+
dev_put(np->dev);
}
EXPORT_SYMBOL(netpoll_cleanup);
EXPORT_SYMBOL(netpoll_send_udp);
EXPORT_SYMBOL(netpoll_poll);
- EXPORT_SYMBOL(netpoll_queue);
*/
#include <linux/dccp.h>
+ #include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&dccp_death_row),
.twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &dccp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
- LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket "
- "table overflow\n");
+ DCCP_WARN("time wait bucket table overflow\n");
}
dccp_done(sk);
/*
* Step 3: Process LISTEN state
*
- * // Generate a new socket and switch to that socket
- * Set S := new socket for this port pair
+ * (* Generate a new socket and switch to that socket *)
+ * Set S := new socket for this port pair
*/
struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
/*
* Step 3: Process LISTEN state
*
- * Choose S.ISS (initial seqno) or set from Init Cookie
- * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
- * Cookie
+ * Choose S.ISS (initial seqno) or set from Init Cookies
+ * Initialize S.GAR := S.ISS
+ * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
*/
/* See dccp_v4_conn_request */
/* Check for retransmitted REQUEST */
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
- if (after48(DCCP_SKB_CB(skb)->dccpd_seq,
- dccp_rsk(req)->dreq_isr)) {
- struct dccp_request_sock *dreq = dccp_rsk(req);
+ struct dccp_request_sock *dreq = dccp_rsk(req);
+ if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) {
dccp_pr_debug("Retransmitted REQUEST\n");
- /* Send another RESPONSE packet */
- dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1);
- dccp_set_seqno(&dreq->dreq_isr,
- DCCP_SKB_CB(skb)->dccpd_seq);
+ dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
+ /*
+ * Send another RESPONSE packet
+ * To protect against Request floods, increment retrans
+ * counter (backoff, monitored by dccp_response_timer).
+ */
+ req->retrans++;
req->rsk_ops->rtx_syn_ack(sk, req, NULL);
}
/* Network Duplicate, discard packet */
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
- req->rsk_ops->send_reset(skb);
+ req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
}
EXPORT_SYMBOL_GPL(dccp_child_process);
+
+ void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk)
+ {
+ DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
+ }
+
+ EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
+
+ void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb)
+ {
+ inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
+ inet_rsk(req)->acked = 0;
+ req->rcv_wnd = sysctl_dccp_feat_sequence_window;
+ }
+
+ EXPORT_SYMBOL_GPL(dccp_reqsk_init);
#include "ieee80211softmac_priv.h"
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
/* Queues an auth request to the desired AP */
int
auth->mac = mac;
auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
- INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+ INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
/* Lock (for list) */
spin_lock_irqsave(&mac->lock, flags);
/* add to list */
list_add_tail(&auth->list, &mac->auth_queue);
- schedule_work(&auth->work);
+ schedule_delayed_work(&auth->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
/* Sends an auth request to the desired AP and handles timeouts */
static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
{
struct ieee80211softmac_device *mac;
struct ieee80211softmac_auth_queue_item *auth;
struct ieee80211softmac_network *net;
unsigned long flags;
- auth = (struct ieee80211softmac_auth_queue_item *)data;
+ auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
net = auth->net;
mac = auth->mac;
/* Sends a response to an auth challenge (for shared key auth). */
static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
{
- struct ieee80211softmac_auth_queue_item *aq = _aq;
+ struct ieee80211softmac_auth_queue_item *aq =
+ container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
/* Send our response */
ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
/* Make sure that we've got an auth queue item for this request */
if(aq == NULL)
{
- printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
+ dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
/* Error #? */
return -1;
}
/* Check for out of order authentication */
if(!net->authenticating)
{
- printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
+ dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
return -1;
}
net->challenge_len = *data++;
if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
- if (net->challenge != NULL)
- kfree(net->challenge);
- net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
- memcpy(net->challenge, data, net->challenge_len);
+ kfree(net->challenge);
+ net->challenge = kmemdup(data, net->challenge_len,
+ GFP_ATOMIC);
+ if (net->challenge == NULL) {
+ printkl(KERN_NOTICE PFX "Shared Key "
+ "Authentication failed due to "
+ "memory shortage.\n");
+ spin_unlock_irqrestore(&mac->lock, flags);
+ break;
+ }
aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
/* We reuse the work struct from the auth request here.
* we have obviously already sent the initial auth
* request. */
cancel_delayed_work(&aq->work);
- INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
- schedule_work(&aq->work);
+ INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+ schedule_delayed_work(&aq->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
/* Make sure the network is authenticated */
if (!net->authenticated)
{
- printkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
+ dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
/* Error okay? */
return -EPERM;
}
net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2);
if (net == NULL) {
- printkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
+ dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
MAC_ARG(deauth->header.addr2));
return 0;
}
/* Make sure the network is authenticated */
if(!net->authenticated)
{
- printkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
+ dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
/* Error okay? */
return -EPERM;
}
ieee80211softmac_deauth_from_net(mac, net);
/* let's try to re-associate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
sm->scanning = 1;
spin_unlock_irqrestore(&sm->lock, flags);
- netif_tx_disable(sm->ieee->dev);
ret = sm->start_scan(sm->dev);
if (ret) {
spin_lock_irqsave(&sm->lock, flags);
/* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
{
int invalid_channel;
u8 current_channel_idx;
- struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
- struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+ struct ieee80211softmac_scaninfo *si =
+ container_of(work, struct ieee80211softmac_scaninfo,
+ softmac_scan.work);
+ struct ieee80211softmac_device *sm = si->mac;
unsigned long flags;
while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
si->started = 0;
spin_unlock_irqrestore(&sm->lock, flags);
- dprintk(PFX "Scanning finished\n");
+ dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n",
+ sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel);
ieee80211softmac_scan_finished(sm);
complete_all(&sm->scaninfo->finished);
}
struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
if (unlikely(!info))
return NULL;
- INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+ INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+ info->mac = mac;
init_completion(&info->finished);
return info;
}
sm->scaninfo->channels = sm->ieee->geo.bg;
sm->scaninfo->number_channels = sm->ieee->geo.bg_channels;
}
- dprintk(PFX "Start scanning with channel: %d\n", sm->scaninfo->channels[0].channel);
- dprintk(PFX "Scanning %d channels\n", sm->scaninfo->number_channels);
sm->scaninfo->current_channel_idx = 0;
sm->scaninfo->started = 1;
sm->scaninfo->stop = 0;
INIT_COMPLETION(sm->scaninfo->finished);
- schedule_work(&sm->scaninfo->softmac_scan);
+ schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
spin_unlock_irqrestore(&sm->lock, flags);
return 0;
}
if (net)
sm->set_channel(sm->dev, net->channel);
}
- netif_wake_queue(sm->ieee->dev);
ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL);
}
EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished);
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&tcp_death_row),
.twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &tcp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
tw->tw_ipv6only = np->ipv6only;
}
#endif
+
+ #ifdef CONFIG_TCP_MD5SIG
+ /*
+ * The timewait bucket does not have the key DB from the
+ * sock structure. We just make a quick copy of the
+ * md5 key being used (if indeed we are using one)
+ * so the timewait ack generating code has the key.
+ */
+ do {
+ struct tcp_md5sig_key *key;
+ memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
+ tcptw->tw_md5_keylen = 0;
+ key = tp->af_specific->md5_lookup(sk, sk);
+ if (key != NULL) {
+ memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
+ tcptw->tw_md5_keylen = key->keylen;
+ if (tcp_alloc_md5sig_pool() == NULL)
+ BUG();
+ }
+ } while(0);
+ #endif
+
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
- if (net_ratelimit())
- printk(KERN_INFO "TCP: time wait bucket table overflow\n");
+ LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
}
tcp_update_metrics(sk);
tcp_done(sk);
}
+ void tcp_twsk_destructor(struct sock *sk)
+ {
+ #ifdef CONFIG_TCP_MD5SIG
+ struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+ if (twsk->tw_md5_keylen)
+ tcp_put_md5sig_pool();
+ #endif
+ }
+
+ EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
+ #ifdef CONFIG_TCP_MD5SIG
+ newtp->md5sig_info = NULL; /*XXX*/
+ if (newtp->af_specific->md5_lookup(sk, newsk))
+ newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+ #endif
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
struct request_sock **prev)
{
struct tcphdr *th = skb->h.th;
- u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
+ __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
struct tcp_options_received tmp_opt;
struct sock *child;
req, NULL);
if (child == NULL)
goto listen_overflow;
+ #ifdef CONFIG_TCP_MD5SIG
+ else {
+ /* Copy over the MD5 key from the original socket */
+ struct tcp_md5sig_key *key;
+ struct tcp_sock *tp = tcp_sk(sk);
+ key = tp->af_specific->md5_lookup(sk, child);
+ if (key != NULL) {
+ /*
+ * We're using one, so create a matching key on the
+ * newsk structure. If we fail to get memory then we
+ * end up not copying the key across. Shucks.
+ */
+ char *newkey = kmemdup(key->key, key->keylen,
+ GFP_ATOMIC);
+ if (newkey) {
+ if (!tcp_alloc_md5sig_pool())
+ BUG();
+ tp->af_specific->md5_add(child, child,
+ newkey,
+ key->keylen);
+ }
+ }
+ }
+ #endif
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
embryonic_reset:
NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST))
- req->rsk_ops->send_reset(skb);
+ req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev);
return NULL;
#include <net/sctp/sm.h>
/* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
/* 1st Level Abstractions. */
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
- sctp_inq_set_th_handler(&asoc->base.inqueue,
- (void (*)(void *))sctp_assoc_bh_rcv,
- asoc);
+ sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
" port: %d\n",
asoc,
(&peer->ipaddr),
- peer->ipaddr.v4.sin_port);
+ ntohs(peer->ipaddr.v4.sin_port));
/* If we are to remove the current retran_path, update it
* to the next peer before removing this peer from the list.
sp = sctp_sk(asoc->base.sk);
/* AF_INET and AF_INET6 share common port field. */
- port = addr->v4.sin_port;
+ port = ntohs(addr->v4.sin_port);
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
" port: %d state:%d\n",
asoc,
addr,
- addr->v4.sin_port,
+ port,
peer_state);
/* Set the port if it has not been set yet. */
struct sctp_transport *first;
struct sctp_transport *second;
struct sctp_ulpevent *event;
+ struct sockaddr_storage addr;
struct list_head *pos;
int spc_state = 0;
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
* user.
*/
- event = sctp_ulpevent_make_peer_addr_change(asoc,
- (struct sockaddr_storage *) &transport->ipaddr,
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
+ event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
0, spc_state, error, GFP_ATOMIC);
if (event)
sctp_ulpq_tail_event(&asoc->ulpq, event);
struct list_head *entry, *pos;
struct sctp_transport *transport;
struct sctp_chunk *chunk;
- __u32 key = htonl(tsn);
+ __be32 key = htonl(tsn);
match = NULL;
sctp_read_lock(&asoc->base.addr_lock);
- if ((asoc->base.bind_addr.port == laddr->v4.sin_port) &&
- (asoc->peer.port == paddr->v4.sin_port)) {
+ if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
+ (htons(asoc->peer.port) == paddr->v4.sin_port)) {
transport = sctp_assoc_lookup_paddr(asoc, paddr);
if (!transport)
goto out;
}
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
{
+ struct sctp_association *asoc =
+ container_of(work, struct sctp_association,
+ base.inqueue.immediate);
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sock *sk;
" port: %d\n",
asoc,
(&t->ipaddr),
- t->ipaddr.v4.sin_port);
+ ntohs(t->ipaddr.v4.sin_port));
}
/* Choose the transport for sending a INIT packet. */
" port: %d\n",
asoc,
(&t->ipaddr),
- t->ipaddr.v4.sin_port);
+ ntohs(t->ipaddr.v4.sin_port));
return t;
}
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
/*
* Initialize the base fields of the endpoint structure.
{
memset(ep, 0, sizeof(struct sctp_endpoint));
+ ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
+ if (!ep->digest)
+ return NULL;
+
/* Initialize the base structure. */
/* What type of endpoint are we? */
ep->base.type = SCTP_EP_TYPE_SOCKET;
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
- sctp_inq_set_th_handler(&ep->base.inqueue,
- (void (*)(void *))sctp_endpoint_bh_rcv, ep);
+ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
/* Free up the HMAC transform. */
crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
+ /* Free the digest buffer */
+ kfree(ep->digest);
+
/* Cleanup. */
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
struct sctp_endpoint *retval;
sctp_read_lock(&ep->base.addr_lock);
- if (ep->base.bind_addr.port == laddr->v4.sin_port) {
+ if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
sctp_sk(ep->base.sk))) {
retval = ep;
struct sctp_association *asoc;
struct list_head *pos;
- rport = paddr->v4.sin_port;
+ rport = ntohs(paddr->v4.sin_port);
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
{
+ struct sctp_endpoint *ep =
+ container_of(work, struct sctp_endpoint,
+ base.inqueue.immediate);
struct sctp_association *asoc;
struct sock *sk;
struct sctp_transport *transport;
static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
+ static inline int
+ __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+ {
+ return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
+ addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
+ !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+ !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+ (fl->proto == sel->proto || !sel->proto) &&
+ (fl->oif == sel->ifindex || !sel->ifindex);
+ }
+
+ static inline int
+ __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+ {
+ return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
+ addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
+ !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+ !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+ (fl->proto == sel->proto || !sel->proto) &&
+ (fl->oif == sel->ifindex || !sel->ifindex);
+ }
+
+ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+ unsigned short family)
+ {
+ switch (family) {
+ case AF_INET:
+ return __xfrm4_selector_match(sel, fl);
+ case AF_INET6:
+ return __xfrm6_selector_match(sel, fl);
+ }
+ return 0;
+ }
+
int xfrm_register_type(struct xfrm_type *type, unsigned short family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
xfrm_pol_put(policy);
}
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
{
struct xfrm_policy *policy;
struct hlist_node *entry, *tmp;
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
int dir, total;
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
if (tmpl->mode == XFRM_MODE_TUNNEL) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
+ family = tmpl->encap_family;
if (xfrm_addr_any(local, family)) {
error = xfrm_get_saddr(&tmp, remote, family);
if (error)
if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
return 0;
- if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol))
+ if (fl && pol &&
+ !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
return 0;
if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0;
panic("XFRM: failed to allocate bydst hash\n");
}
- INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+ INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
register_netdevice_notifier(&xfrm_dev_notifier);
}
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
unsigned long nsize, osize;
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
DECLARE_WAIT_QUEUE_HEAD(km_waitq);
EXPORT_SYMBOL(km_waitq);
kfree(x);
}
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
{
struct xfrm_state *x;
struct hlist_node *entry, *tmp;
}
EXPORT_SYMBOL(km_query);
- int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
+ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
{
int err = -EINVAL;
struct xfrm_mgr *km;
panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
- INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+ INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
}