FB The frame buffer device is enabled.
HW Appropriate hardware is enabled.
IA-64 IA-64 architecture is enabled.
+ IMA Integrity measurement architecture is enabled.
IOSCHED More than one I/O scheduler is enabled.
IP_PNP IP DHCP, BOOTP, or RARP is enabled.
ISAPNP ISA PnP code is enabled.
hvc_iucv= [S390] Number of z/VM IUCV hypervisor console (HVC)
terminal devices. Valid values: 0..8
+ hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs.
+ If specified, z/VM IUCV HVC accepts connections
+ from listed z/VM user IDs only.
i8042.debug [HW] Toggle i8042 debug mode
i8042.direct [HW] Put keyboard port into non-translated mode
ihash_entries= [KNL]
Set number of hash buckets for inode cache.
+ ima_audit= [IMA]
+ Format: { "0" | "1" }
+ 0 -- integrity auditing messages. (Default)
+ 1 -- enable informational integrity auditing messages.
+
+ ima_hash= [IMA]
+ Formt: { "sha1" | "md5" }
+ default: "sha1"
+
in2000= [HW,SCSI]
See header of drivers/scsi/in2000.c.
autoconfiguration.
Ranges are in pairs (memory base and size).
- dynamic_printk Enables pr_debug()/dev_dbg() calls if
- CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled.
- These can also be switched on/off via
- <debugfs>/dynamic_printk/modules
-
print-fatal-signals=
[KNL] debug: print fatal signals
print-fatal-signals=1: print segfault info to
S: Supported
BROADCOM TG3 GIGABIT ETHERNET DRIVER
+P: Matt Carlson
P: Michael Chan
T: git kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
S: Maintained
+CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
+P: Neil Horman
+S: Maintained
+
CS5535 Audio ALSA driver
P: Jaya Kumar
S: Maintained
+INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
+P: Mimi Zohar
+S: Supported
+
IMS TWINTURBO FRAMEBUFFER DRIVER
S: Orphan
S: Maintained
+RDS - RELIABLE DATAGRAM SOCKETS
+P: Andy Grover
+S: Supported
+
READ-COPY UPDATE (RCU)
P: Dipankar Sarma
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
+ S390 ZCRYPT DRIVER
+ P: Felix Beck
+ P: Ralph Wuerthner
+ S: Supported
+
S390 ZFCP DRIVER
P: Christof Schmitt
T: git kernel.org:pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
+W: http://security.wiki.kernel.org/
S: Supported
SECURITY CONTACT
W: http://sourceforge.net/projects/tlan/
S: Maintained
+TOMOYO SECURITY MODULE
+P: Kentaro Takeda
+P: Tetsuo Handa
+W: http://tomoyo.sourceforge.jp/
+T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.2.x/tomoyo-lsm/patches/
+S: Maintained
+
TOSHIBA ACPI EXTRAS DRIVER
P: John Belmonte
#include <linux/timer.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
-
+ #include <asm/nmi.h>
#include "kvm-s390.h"
#include "gaccess.h"
setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
(unsigned long) vcpu);
get_cpu_id(&vcpu->arch.cpu_id);
- vcpu->arch.cpu_id.version = 0xfe;
+ vcpu->arch.cpu_id.version = 0xff;
return 0;
}
return -EINVAL; /* not implemented yet */
}
-int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
- struct kvm_debug_guest *dbg)
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
{
return -EINVAL; /* not implemented yet */
}
return -EINVAL; /* not implemented yet */
}
- extern void s390_handle_mcck(void);
-
static void __vcpu_run(struct kvm_vcpu *vcpu)
{
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
out:
if (rc) {
- /* Release onoff "lock" when ungrouping failed. */
- atomic_set(&gdev->onoff, 0);
+ if (rc != -EAGAIN)
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
EXPORT_SYMBOL(ccwgroup_create_from_string);
- static int __init
- init_ccwgroup (void)
+ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+ void *data);
+
+ static struct notifier_block ccwgroup_nb = {
+ .notifier_call = ccwgroup_notifier
+ };
+
+ static int __init init_ccwgroup(void)
{
- return bus_register (&ccwgroup_bus_type);
+ int ret;
+
+ ret = bus_register(&ccwgroup_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ if (ret)
+ bus_unregister(&ccwgroup_bus_type);
+
+ return ret;
}
- static void __exit
- cleanup_ccwgroup (void)
+ static void __exit cleanup_ccwgroup(void)
{
- bus_unregister (&ccwgroup_bus_type);
+ bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
unsigned long value;
int ret;
- gdev = to_ccwgroupdev(dev);
if (!dev->driver)
- return count;
+ return -ENODEV;
+
+ gdev = to_ccwgroupdev(dev);
+ gdrv = to_ccwgroupdrv(dev->driver);
- gdrv = to_ccwgroupdrv (gdev->dev.driver);
if (!try_module_get(gdrv->owner))
return -EINVAL;
ret = strict_strtoul(buf, 0, &value);
if (ret)
goto out;
- ret = count;
+
if (value == 1)
- ccwgroup_set_online(gdev);
+ ret = ccwgroup_set_online(gdev);
else if (value == 0)
- ccwgroup_set_offline(gdev);
+ ret = ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
out:
module_put(gdrv->owner);
- return ret;
+ return (ret == 0) ? count : ret;
}
static ssize_t
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
+ device_remove_file(dev, &dev_attr_online);
+ device_remove_file(dev, &dev_attr_ungroup);
+
+ if (!dev->driver)
+ return 0;
+
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- device_remove_file(dev, &dev_attr_online);
-
- if (gdrv && gdrv->remove)
+ if (gdrv->remove)
gdrv->remove(gdev);
+
return 0;
}
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
+ if (!dev->driver)
+ return;
+
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- if (gdrv && gdrv->shutdown)
+
+ if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
.shutdown = ccwgroup_shutdown,
};
+
+ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+ {
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER)
+ device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ return NOTIFY_OK;
+ }
+
+
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
#include <linux/list.h>
#include <linux/reboot.h>
#include <asm/isc.h>
+ #include <asm/crw.h>
- #include "../s390mach.h"
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
return rc;
}
+ static int call_fn_all_sch(struct subchannel_id schid, void *data)
+ {
+ struct cb_data *cb = data;
+ struct subchannel *sch;
+ int rc = 0;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ if (cb->fn_known_sch)
+ rc = cb->fn_known_sch(sch, cb->data);
+ put_device(&sch->dev);
+ } else {
+ if (cb->fn_unknown_sch)
+ rc = cb->fn_unknown_sch(schid, cb->data);
+ }
+
+ return rc;
+ }
+
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data)
struct cb_data cb;
int rc;
- cb.set = idset_sch_new();
- if (!cb.set)
- return -ENOMEM;
- idset_fill(cb.set);
cb.data = data;
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
+
+ cb.set = idset_sch_new();
+ if (!cb.set)
+ /* fall back to brute force scanning in case of oom */
+ return for_each_subchannel(call_fn_all_sch, &cb);
+
+ idset_fill(cb.set);
+
/* Process registered subchannels. */
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
if (rc)
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence.
*/
- sch->dev.uevent_suppress = 1;
+ dev_set_uevent_suppress(&sch->dev, 1);
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
* a fitting driver module may be loaded based on the
* modalias.
*/
- sch->dev.uevent_suppress = 0;
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
return ret;
return ret;
}
+ static void reprobe_after_idle(struct work_struct *unused)
+ {
+ /* Make sure initial subchannel scan is done. */
+ wait_event(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (need_reprobe)
+ css_schedule_reprobe();
+ }
+
+ static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
+
/* Work function used to reprobe all unregistered subchannels. */
static void reprobe_all(struct work_struct *unused)
{
CIO_MSG_EVENT(4, "reprobe start\n");
- need_reprobe = 0;
/* Make sure initial subchannel scan is done. */
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
+ if (atomic_read(&ccw_device_init_count) != 0) {
+ queue_work(ccw_device_work, &reprobe_idle_work);
+ return;
+ }
+ need_reprobe = 0;
ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
} else {
#ifdef CONFIG_SMP
- css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
+ css->global_pgid.pgid_high.cpu_addr = stap();
#else
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
if (ret)
goto out;
- ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
+ ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
if (ret)
goto out;
out_bus:
bus_unregister(&css_bus_type);
out:
- s390_unregister_crw_handler(CRW_RSC_CSS);
+ crw_unregister_handler(CRW_RSC_CSS);
chsc_free_sei_area();
kfree(slow_subchannel_set);
pr_alert("The CSS device driver initialization failed with "
return (ret == 0) ? -ENODEV : ret;
}
- static void online_store_handle_offline(struct ccw_device *cdev)
+ static int online_store_handle_offline(struct ccw_device *cdev)
{
if (cdev->private->state == DEV_STATE_DISCONNECTED)
ccw_device_remove_disconnected(cdev);
- else if (cdev->drv && cdev->drv->set_offline)
- ccw_device_set_offline(cdev);
+ else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ return ccw_device_set_offline(cdev);
+ return 0;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
goto out;
switch (i) {
case 0:
- online_store_handle_offline(cdev);
- ret = count;
+ ret = online_store_handle_offline(cdev);
break;
case 1:
ret = online_store_handle_online(cdev, force);
- if (!ret)
- ret = count;
break;
default:
ret = -EINVAL;
if (cdev->drv)
module_put(cdev->drv->owner);
atomic_set(&cdev->private->onoff, 0);
- return ret;
+ return (ret < 0) ? ret : count;
}
static ssize_t
return dev ? to_ccwdev(dev) : NULL;
}
- static void
- ccw_device_add_changed(struct work_struct *work)
- {
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- if (device_add(&cdev->dev)) {
- put_device(&cdev->dev);
- return;
- }
- set_bit(1, &cdev->private->registered);
- }
-
- void ccw_device_do_unreg_rereg(struct work_struct *work)
+ void ccw_device_do_unbind_bind(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
+ int ret;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
- ccw_device_unregister(cdev);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_add_changed);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ if (test_bit(1, &cdev->private->registered)) {
+ device_release_driver(&cdev->dev);
+ ret = device_attach(&cdev->dev);
+ WARN_ON(ret == -ENODEV);
+ }
}
static void
return;
other_sch = to_subchannel(cdev->dev.parent);
/* Note: device_move() changes cdev->dev.parent */
- ret = device_move(&cdev->dev, &sch->dev);
+ ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
if (ret) {
CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
* Try to move the ccw device to its new subchannel.
* Note: device_move() changes cdev->dev.parent
*/
- ret = device_move(&cdev->dev, &sch->dev);
+ ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
if (ret) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
"failed (ret=%d)!\n",
* ccw device can take its place on the subchannel.
* Note: device_move() changes cdev->dev.parent
*/
- ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
+ ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
+ DPM_ORDER_NONE);
if (ret) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
- sch->dev.uevent_suppress = 0;
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
/* make it known to the system */
ret = ccw_device_register(cdev);
void
io_subchannel_recog_done(struct ccw_device *cdev)
{
- struct subchannel *sch;
-
if (css_init_done == 0) {
cdev->private->flags.recog_done = 1;
return;
/* Remove device found not operational. */
if (!get_device(&cdev->dev))
break;
- sch = to_subchannel(cdev->dev.parent);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
* Try to move the ccw device to its new subchannel.
* Note: device_move() changes cdev->dev.parent
*/
- rc = device_move(&cdev->dev, &sch->dev);
+ rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
mutex_unlock(&sch->reg_mutex);
if (rc) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
* the ccw_device and exit. This happens for all early
* devices, e.g. the console.
*/
- sch->dev.uevent_suppress = 0;
+ dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ip.h>
-#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mii.h>
#include <linux/kthread.h>
#include <asm/io.h>
#include "qeth_core.h"
-#include "qeth_core_offl.h"
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
netif_tx_disable(card->dev);
card->options.large_send = type;
switch (card->options.large_send) {
- case QETH_LARGE_SEND_EDDP:
- if (card->info.type != QETH_CARD_TYPE_IQD) {
- card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM;
- } else {
- card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
- card->options.large_send = QETH_LARGE_SEND_NO;
- rc = -EOPNOTSUPP;
- }
- break;
case QETH_LARGE_SEND_TSO:
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
- qeth_eddp_buf_release_contexts(buf);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
int rc;
unsigned long flags;
struct qeth_reply *reply = NULL;
- unsigned long timeout;
+ unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd;
QETH_DBF_TEXT(TRACE, 2, "sendctl");
qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data))
- timeout = jiffies + QETH_IPA_TIMEOUT;
+ event_timeout = QETH_IPA_TIMEOUT;
else
- timeout = jiffies + QETH_TIMEOUT;
+ event_timeout = QETH_TIMEOUT;
+ timeout = jiffies + event_timeout;
QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
if ((cmd->hdr.command == IPA_CMD_SETIP) &&
(cmd->hdr.prot_version == QETH_PROT_IPV4)) {
if (!wait_event_timeout(reply->wait_q,
- atomic_read(&reply->received), timeout))
+ atomic_read(&reply->received), event_timeout))
goto time_err;
} else {
while (!atomic_read(&reply->received)) {
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
- int cc = qdio_err & 3;
QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
- switch (cc) {
- case 0:
- if (qdio_err) {
- QETH_DBF_TEXT(TRACE, 1, "lnkfail");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
- (u16)qdio_err, (u8)sbalf15);
- return QETH_SEND_ERROR_LINK_FAILURE;
- }
+
+ if (!qdio_err)
return QETH_SEND_ERROR_NONE;
- case 2:
- if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
- QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- return QETH_SEND_ERROR_KICK_IT;
- }
- if ((sbalf15 >= 15) && (sbalf15 <= 31))
- return QETH_SEND_ERROR_RETRY;
- return QETH_SEND_ERROR_LINK_FAILURE;
- /* look at qdio_error and sbalf 15 */
- case 1:
- QETH_DBF_TEXT(TRACE, 1, "SIGAcc1");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- return QETH_SEND_ERROR_LINK_FAILURE;
- case 3:
- default:
- QETH_DBF_TEXT(TRACE, 1, "SIGAcc3");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- return QETH_SEND_ERROR_KICK_IT;
- }
+
+ if ((sbalf15 >= 15) && (sbalf15 <= 31))
+ return QETH_SEND_ERROR_RETRY;
+
+ QETH_DBF_TEXT(TRACE, 1, "lnkfail");
+ QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
+ QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
+ (u16)qdio_err, (u8)sbalf15);
+ return QETH_SEND_ERROR_LINK_FAILURE;
}
/*
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
if (rc) {
+ queue->card->stats.tx_errors += count;
+ /* ignore temporary SIGA errors without busy condition */
+ if (rc == QDIO_ERROR_SIGA_TARGET)
+ return;
QETH_DBF_TEXT(TRACE, 2, "flushbuf");
QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
- queue->card->stats.tx_errors += count;
+
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
}
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- /*we only handle the KICK_IT error by doing a recovery */
- if (qeth_handle_send_error(card, buffer, qdio_error)
- == QETH_SEND_ERROR_KICK_IT){
- netif_stop_queue(card->dev);
- qeth_schedule_recovery(card);
- return;
- }
+ qeth_handle_send_error(card, buffer, qdio_error);
qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, int elements_needed,
- struct qeth_eddp_context *ctx, int offset, int hd_len)
+ int offset, int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
- int buffers_needed = 0;
- int flush_cnt = 0;
int index;
/* spin until we get the queue ... */
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
goto out;
- if (ctx == NULL)
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- else {
- buffers_needed = qeth_eddp_check_buffers_for_context(queue,
- ctx);
- if (buffers_needed < 0)
- goto out;
- queue->next_buf_to_fill =
- (queue->next_buf_to_fill + buffers_needed) %
- QDIO_MAX_BUFFERS_PER_Q;
- }
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- if (ctx == NULL) {
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
- qeth_flush_buffers(queue, index, 1);
- } else {
- flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
- WARN_ON(buffers_needed != flush_cnt);
- qeth_flush_buffers(queue, index, flush_cnt);
- }
+ qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+ qeth_flush_buffers(queue, index, 1);
return 0;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
- int elements_needed, struct qeth_eddp_context *ctx)
+ int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
int start_index;
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
- if (ctx == NULL) {
- /* does packet fit in current buffer? */
- if ((QETH_MAX_BUFFER_ELEMENTS(card) -
- buffer->next_element_to_fill) < elements_needed) {
- /* ... no -> set state PRIMED */
- atomic_set(&buffer->state,
- QETH_QDIO_BUF_PRIMED);
- flush_count++;
- queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
- buffer = &queue->bufs[queue->next_buf_to_fill];
- /* we did a step forward, so check buffer state
- * again */
- if (atomic_read(&buffer->state) !=
- QETH_QDIO_BUF_EMPTY){
- qeth_flush_buffers(queue, start_index,
+ /* does packet fit in current buffer? */
+ if ((QETH_MAX_BUFFER_ELEMENTS(card) -
+ buffer->next_element_to_fill) < elements_needed) {
+ /* ... no -> set state PRIMED */
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ flush_count++;
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ buffer = &queue->bufs[queue->next_buf_to_fill];
+ /* we did a step forward, so check buffer state
+ * again */
+ if (atomic_read(&buffer->state) !=
+ QETH_QDIO_BUF_EMPTY) {
+ qeth_flush_buffers(queue, start_index,
flush_count);
- atomic_set(&queue->state,
+ atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
- }
- }
- } else {
- /* check if we have enough elements (including following
- * free buffers) to handle eddp context */
- if (qeth_eddp_check_buffers_for_context(queue, ctx)
- < 0) {
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
}
}
- if (ctx == NULL)
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
- else {
- tmp = qeth_eddp_fill_buffer(queue, ctx,
- queue->next_buf_to_fill);
- if (tmp < 0) {
- rc = -EBUSY;
- goto out;
- }
- }
+ tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;
-out:
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
/* 30 */{"tx count"},
{"tx do_QDIO time"},
{"tx do_QDIO count"},
+ {"tx csum"},
};
int qeth_core_get_stats_count(struct net_device *dev)
data[30] = card->perf_stats.outbound_cnt;
data[31] = card->perf_stats.outbound_do_qdio_time;
data[32] = card->perf_stats.outbound_do_qdio_cnt;
+ data[33] = card->perf_stats.tx_csum;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);