diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
commit | d0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch) | |
tree | 7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers/s390 | |
parent | e914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff) |
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers/s390')
38 files changed, 1236 insertions, 934 deletions
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 42b34cd1f..98bbec44b 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -228,7 +228,7 @@ check_XRC (struct ccw1 *de_ccw, data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ - rc = get_sync_clock(&data->ep_sys_time); + rc = get_phys_clock(&data->ep_sys_time); /* Ignore return code if sync clock is switched off. */ if (rc == -EOPNOTSUPP || rc == -EACCES) rc = 0; @@ -339,7 +339,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */ pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ - rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); + rc = get_phys_clock(&pfxdata->define_extent.ep_sys_time); /* Ignore return code if sync clock is switched off. */ if (rc == -EOPNOTSUPP || rc == -EACCES) rc = 0; @@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, return PTR_ERR(cqr); } + cqr->lpm = lpum; +retry: cqr->startdev = device; cqr->memdev = device; cqr->block = NULL; @@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, (prssdp + 1); memcpy(messages, message_buf, sizeof(struct dasd_rssd_messages)); + } else if (cqr->lpm) { + /* + * on z/VM we might not be able to do I/O on the requested path + * but instead we get the required information on any path + * so retry with open path mask + */ + cqr->lpm = 0; + goto retry; } else DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "Reading messages failed with rc=%d\n" diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 31d544a87..e2fa759bf 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -45,7 +45,6 @@ int dasd_gendisk_alloc(struct dasd_block *block) gdp->major = DASD_MAJOR; gdp->first_minor = base->devindex << DASD_PARTN_BITS; gdp->fops = &dasd_device_operations; - gdp->driverfs_dev = &base->cdev->dev; /* * Set device name. @@ -76,7 +75,7 @@ int dasd_gendisk_alloc(struct dasd_block *block) gdp->queue = block->request_queue; block->gdp = gdp; set_capacity(block->gdp, 0); - add_disk(block->gdp); + device_add_disk(&base->cdev->dev, block->gdp); return 0; } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index bed53c46d..9d66b4fb1 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode); static blk_qc_t dcssblk_make_request(struct request_queue *q, struct bio *bio); static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, - void __pmem **kaddr, pfn_t *pfn, long size); + void **kaddr, pfn_t *pfn, long size); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -615,9 +615,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; - dev_info->gd->driverfs_dev = &dev_info->dev; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); + queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors @@ -655,7 +655,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char goto put_dev; get_device(&dev_info->dev); - add_disk(dev_info->gd); + device_add_disk(&dev_info->dev, dev_info->gd); switch (dev_info->segment_type) { case SEG_TYPE_SR: @@ -884,7 +884,7 @@ fail: static long dcssblk_direct_access (struct block_device *bdev, sector_t secnum, - void __pmem **kaddr, pfn_t *pfn, long size) + void **kaddr, pfn_t *pfn, long size) { struct dcssblk_dev_info *dev_info; unsigned long offset, dev_sz; @@ -894,7 +894,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum, return -ENODEV; dev_sz = dev_info->end - dev_info->start; offset = secnum * 512; - *kaddr = (void __pmem *) (dev_info->start + offset); + *kaddr = (void *) dev_info->start + offset; *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV); return dev_sz - offset; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index e6f54d3b8..9f16ea696 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -512,7 +512,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) goto out_queue; rq->queuedata = scmdev; - bdev->gendisk->driverfs_dev = &scmdev->dev; bdev->gendisk->private_data = scmdev; bdev->gendisk->fops = &scm_blk_devops; bdev->gendisk->queue = rq; @@ -531,7 +530,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) /* 512 byte sectors */ set_capacity(bdev->gendisk, scmdev->size >> 9); - add_disk(bdev->gendisk); + device_add_disk(&scmdev->dev, bdev->gendisk); return 0; out_queue: diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index ef04a9f7a..7b9c50aa4 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -438,18 +438,9 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs, return -EFAULT; if (len > sizeof(u_kbs->kb_string)) return -EINVAL; - p = kmalloc(len, GFP_KERNEL); - if (!p) - return -ENOMEM; - if (copy_from_user(p, u_kbs->kb_string, len)) { - kfree(p); - return -EFAULT; - } - /* - * Make sure the string is terminated by 0. User could have - * modified it between us running strnlen_user() and copying it. - */ - p[len - 1] = 0; + p = memdup_user_nul(u_kbs->kb_string, len); + if (IS_ERR(p)) + return PTR_ERR(p); kfree(kbd->func_table[kb_func]); kbd->func_table[kb_func] = p; break; diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 5880def98..6037bc87e 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -319,7 +319,8 @@ sclp_console_init(void) int i; int rc; - if (!CONSOLE_IS_SCLP) + /* SCLP consoles are handled together */ + if (!(CONSOLE_IS_SCLP || CONSOLE_IS_VT220)) return 0; rc = sclp_rw_init(); if (rc) diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 2ced50ccc..1406fb688 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -47,7 +47,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) int cpu; struct device *dev; - s390_adjust_jiffies(); + s390_update_cpu_mhz(); pr_info("CPU capability may have changed\n"); get_online_cpus(); for_each_online_cpu(cpu) { diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 0ac520dd1..c71df0c7d 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -46,7 +46,8 @@ struct read_info_sccb { u64 rnmax2; /* 104-111 */ u8 _pad_112[116 - 112]; /* 112-115 */ u8 fac116; /* 116 */ - u8 _pad_117[119 - 117]; /* 117-118 */ + u8 fac117; /* 117 */ + u8 _pad_118; /* 118 */ u8 fac119; /* 119 */ u16 hcpua; /* 120-121 */ u8 _pad_122[124 - 122]; /* 122-123 */ @@ -114,7 +115,12 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.facilities = sccb->facilities; sclp.has_sprp = !!(sccb->fac84 & 0x02); sclp.has_core_type = !!(sccb->fac84 & 0x01); + sclp.has_gsls = !!(sccb->fac85 & 0x80); + sclp.has_64bscao = !!(sccb->fac116 & 0x80); + sclp.has_cmma = !!(sccb->fac116 & 0x40); sclp.has_esca = !!(sccb->fac116 & 0x08); + sclp.has_pfmfi = !!(sccb->fac117 & 0x40); + sclp.has_ibs = !!(sccb->fac117 & 0x20); sclp.has_hvs = !!(sccb->fac119 & 0x80); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; @@ -145,6 +151,10 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.has_siif = cpue->siif; sclp.has_sigpif = cpue->sigpif; sclp.has_sief2 = cpue->sief2; + sclp.has_gpere = cpue->gpere; + sclp.has_ib = cpue->ib; + sclp.has_cei = cpue->cei; + sclp.has_skey = cpue->skey; break; } diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c index 2553db0fd..f59b71776 100644 --- a/drivers/s390/char/sclp_ocf.c +++ b/drivers/s390/char/sclp_ocf.c @@ -26,7 +26,7 @@ #define OCF_LENGTH_CPC_NAME 8UL static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1]; -static char cpc_name[OCF_LENGTH_CPC_NAME + 1]; +static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */ static DEFINE_SPINLOCK(sclp_ocf_lock); static struct work_struct sclp_ocf_change_work; @@ -72,9 +72,8 @@ static void sclp_ocf_handler(struct evbuf_header *evbuf) } if (cpc) { size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length); + memset(cpc_name, 0, OCF_LENGTH_CPC_NAME); memcpy(cpc_name, cpc + 1, size); - EBCASC(cpc_name, size); - cpc_name[size] = 0; } spin_unlock(&sclp_ocf_lock); schedule_work(&sclp_ocf_change_work); @@ -85,15 +84,23 @@ static struct sclp_register sclp_ocf_event = { .receiver_fn = sclp_ocf_handler, }; +void sclp_ocf_cpc_name_copy(char *dst) +{ + spin_lock_irq(&sclp_ocf_lock); + memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME); + spin_unlock_irq(&sclp_ocf_lock); +} +EXPORT_SYMBOL(sclp_ocf_cpc_name_copy); + static ssize_t cpc_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - int rc; + char name[OCF_LENGTH_CPC_NAME + 1]; - spin_lock_irq(&sclp_ocf_lock); - rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name); - spin_unlock_irq(&sclp_ocf_lock); - return rc; + sclp_ocf_cpc_name_copy(name); + name[OCF_LENGTH_CPC_NAME] = 0; + EBCASC(name, OCF_LENGTH_CPC_NAME); + return snprintf(page, PAGE_SIZE, "%s\n", name); } static struct kobj_attribute cpc_name_attr = diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 5043ecfa1..16992e2a4 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -185,7 +185,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, { if (ipl_block) { diag308(DIAG308_SET, ipl_block); - diag308(DIAG308_IPL, NULL); + diag308(DIAG308_LOAD_CLEAR, NULL); } return count; } diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 50597f952..46be25c74 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -37,8 +37,7 @@ enum cfg_task_t { /* Map for pending configure tasks. */ static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; -static DEFINE_MUTEX(cfg_lock); -static int cfg_busy; +static DEFINE_SPINLOCK(cfg_lock); /* Map for channel-path status. */ static struct sclp_chp_info chp_info; @@ -47,8 +46,6 @@ static DEFINE_MUTEX(info_lock); /* Time after which channel-path status may be outdated. */ static unsigned long chp_info_expires; -/* Workqueue to perform pending configure tasks. */ -static struct workqueue_struct *chp_wq; static struct work_struct cfg_work; /* Wait queue for configure completion events. */ @@ -428,11 +425,14 @@ int chp_update_desc(struct channel_path *chp) if (rc) return rc; - rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); - if (rc) - return rc; + /* + * Fetching the following data is optional. Not all machines or + * hypervisors implement the required chsc commands. + */ + chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); + chsc_get_channel_measurement_chars(chp); - return chsc_get_channel_measurement_chars(chp); + return 0; } /** @@ -665,6 +665,20 @@ static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) chp_cfg_task[chpid.cssid][chpid.id] = cfg; } +/* Fetch the first configure task. Set chpid accordingly. */ +static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid) +{ + enum cfg_task_t t = cfg_none; + + chp_id_for_each(chpid) { + t = cfg_get_task(*chpid); + if (t != cfg_none) + break; + } + + return t; +} + /* Perform one configure/deconfigure request. Reschedule work function until * last request. */ static void cfg_func(struct work_struct *work) @@ -673,16 +687,9 @@ static void cfg_func(struct work_struct *work) enum cfg_task_t t; int rc; - mutex_lock(&cfg_lock); - t = cfg_none; - chp_id_for_each(&chpid) { - t = cfg_get_task(chpid); - if (t != cfg_none) { - cfg_set_task(chpid, cfg_none); - break; - } - } - mutex_unlock(&cfg_lock); + spin_lock(&cfg_lock); + t = chp_cfg_fetch_task(&chpid); + spin_unlock(&cfg_lock); switch (t) { case cfg_configure: @@ -708,13 +715,14 @@ static void cfg_func(struct work_struct *work) case cfg_none: /* Get updated information after last change. */ info_update(); - mutex_lock(&cfg_lock); - cfg_busy = 0; - mutex_unlock(&cfg_lock); wake_up_interruptible(&cfg_wait_queue); return; } - queue_work(chp_wq, &cfg_work); + spin_lock(&cfg_lock); + if (t == cfg_get_task(chpid)) + cfg_set_task(chpid, cfg_none); + spin_unlock(&cfg_lock); + schedule_work(&cfg_work); } /** @@ -728,11 +736,10 @@ void chp_cfg_schedule(struct chp_id chpid, int configure) { CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, configure); - mutex_lock(&cfg_lock); + spin_lock(&cfg_lock); cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); - cfg_busy = 1; - mutex_unlock(&cfg_lock); - queue_work(chp_wq, &cfg_work); + spin_unlock(&cfg_lock); + schedule_work(&cfg_work); } /** @@ -745,15 +752,27 @@ void chp_cfg_schedule(struct chp_id chpid, int configure) void chp_cfg_cancel_deconfigure(struct chp_id chpid) { CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); - mutex_lock(&cfg_lock); + spin_lock(&cfg_lock); if (cfg_get_task(chpid) == cfg_deconfigure) cfg_set_task(chpid, cfg_none); - mutex_unlock(&cfg_lock); + spin_unlock(&cfg_lock); +} + +static bool cfg_idle(void) +{ + struct chp_id chpid; + enum cfg_task_t t; + + spin_lock(&cfg_lock); + t = chp_cfg_fetch_task(&chpid); + spin_unlock(&cfg_lock); + + return t == cfg_none; } static int cfg_wait_idle(void) { - if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) + if (wait_event_interruptible(cfg_wait_queue, cfg_idle())) return -ERESTARTSYS; return 0; } @@ -766,11 +785,6 @@ static int __init chp_init(void) ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); if (ret) return ret; - chp_wq = create_singlethread_workqueue("cio_chp"); - if (!chp_wq) { - crw_unregister_handler(CRW_RSC_CPATH); - return -ENOMEM; - } INIT_WORK(&cfg_work, cfg_func); init_waitqueue_head(&cfg_wait_queue); if (info_update()) diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index af0232290..bb5a68226 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -4,7 +4,7 @@ */ #ifndef S390_CHP_H -#define S390_CHP_H S390_CHP_H +#define S390_CHP_H #include <linux/types.h> #include <linux/device.h> diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index c424c0c73..940e725bd 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -907,7 +907,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, struct chsc_scpd *scpd_area; int ccode, ret; - if ((rfmt == 1) && !css_general_characteristics.fcs) + if ((rfmt == 1 || rfmt == 0) && c == 1 && + !css_general_characteristics.fcs) return -EINVAL; if ((rfmt == 2) && !css_general_characteristics.cib) return -EINVAL; @@ -939,7 +940,6 @@ EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); int chsc_determine_base_channel_path_desc(struct chp_id chpid, struct channel_path_desc *desc) { - struct chsc_response_struct *chsc_resp; struct chsc_scpd *scpd_area; unsigned long flags; int ret; @@ -949,8 +949,8 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid, ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); if (ret) goto out; - chsc_resp = (void *)&scpd_area->response; - memcpy(desc, &chsc_resp->data, sizeof(*desc)); + + memcpy(desc, scpd_area->data, sizeof(*desc)); out: spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; @@ -959,18 +959,17 @@ out: int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, struct channel_path_desc_fmt1 *desc) { - struct chsc_response_struct *chsc_resp; struct chsc_scpd *scpd_area; unsigned long flags; int ret; spin_lock_irqsave(&chsc_page_lock, flags); scpd_area = chsc_page; - ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); + ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area); if (ret) goto out; - chsc_resp = (void *)&scpd_area->response; - memcpy(desc, &chsc_resp->data, sizeof(*desc)); + + memcpy(desc, scpd_area->data, sizeof(*desc)); out: spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; @@ -1020,7 +1019,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) chp->cmg = -1; if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) - return 0; + return -EINVAL; spin_lock_irq(&chsc_page_lock); memset(chsc_page, 0, PAGE_SIZE); @@ -1176,7 +1175,7 @@ exit: EXPORT_SYMBOL_GPL(css_general_characteristics); EXPORT_SYMBOL_GPL(css_chsc_characteristics); -int chsc_sstpc(void *page, unsigned int op, u16 ctrl) +int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta) { struct { struct chsc_header request; @@ -1186,7 +1185,9 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl) unsigned int ctrl : 16; unsigned int rsvd2[5]; struct chsc_header response; - unsigned int rsvd3[7]; + unsigned int rsvd3[3]; + u64 clock_delta; + unsigned int rsvd4[2]; } __attribute__ ((packed)) *rr; int rc; @@ -1200,6 +1201,8 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl) if (rc) return -EIO; rc = (rr->response.code == 0x0001) ? 0 : -EIO; + if (clock_delta) + *clock_delta = rr->clock_delta; return rc; } diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 0de134c3a..67c87b6e6 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -112,8 +112,9 @@ struct chsc_scpd { u32 last_chpid:8; u32 zeroes1; struct chsc_header response; - u8 data[PAGE_SIZE - 20]; -} __attribute__ ((packed)); + u32:32; + u8 data[0]; +} __packed; struct chsc_sda_area { struct chsc_header request; diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index b6f12c2bb..735052ecd 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -552,7 +552,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd) goto out_free; } scucd_area->request.length = 0x0010; - scucd_area->request.code = 0x0028; + scucd_area->request.code = 0x0026; scucd_area->m = cd->m; scucd_area->fmt1 = cd->fmt; scucd_area->cssid = cd->cssid; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 2a34eb5f6..268aa23af 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -164,6 +164,9 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count) return ret; } +#define CMF_OFF 0 +#define CMF_ON 2 + /* * Activate or deactivate the channel monitor. When area is NULL, * the monitor is deactivated. The channel monitor needs to @@ -176,7 +179,7 @@ static inline void cmf_activate(void *area, unsigned int onoff) register long __gpr1 asm("1"); __gpr2 = area; - __gpr1 = onoff ? 2 : 0; + __gpr1 = onoff; /* activate channel measurement */ asm("schm" : : "d" (__gpr2), "d" (__gpr1) ); } @@ -587,7 +590,7 @@ static int alloc_cmb(struct ccw_device *cdev) /* everything ok */ memset(mem, 0, size); cmb_area.mem = mem; - cmf_activate(cmb_area.mem, 1); + cmf_activate(cmb_area.mem, CMF_ON); } } @@ -621,7 +624,7 @@ static void free_cmb(struct ccw_device *cdev) if (list_empty(&cmb_area.list)) { ssize_t size; size = sizeof(struct cmb) * cmb_area.num_channels; - cmf_activate(NULL, 0); + cmf_activate(NULL, CMF_OFF); free_pages((unsigned long)cmb_area.mem, get_order(size)); cmb_area.mem = NULL; } @@ -841,7 +844,7 @@ static int alloc_cmbe(struct ccw_device *cdev) /* activate global measurement if this is the first channel */ if (list_empty(&cmb_area.list)) - cmf_activate(NULL, 1); + cmf_activate(NULL, CMF_ON); list_add_tail(&cdev->private->cmb_list, &cmb_area.list); spin_unlock_irq(cdev->ccwlock); @@ -878,7 +881,7 @@ static void free_cmbe(struct ccw_device *cdev) /* deactivate global measurement if this is the last channel */ list_del_init(&cdev->private->cmb_list); if (list_empty(&cmb_area.list)) - cmf_activate(NULL, 0); + cmf_activate(NULL, CMF_OFF); spin_unlock_irq(cdev->ccwlock); spin_unlock(&cmb_area.lock); } @@ -1332,7 +1335,7 @@ void cmf_reactivate(void) { spin_lock(&cmb_area.lock); if (!list_empty(&cmb_area.list)) - cmf_activate(cmb_area.mem, 1); + cmf_activate(cmb_area.mem, CMF_ON); spin_unlock(&cmb_area.lock); } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ada078ff..6a58bc8f4 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, priv->state = DEV_STATE_NOT_OPER; priv->dev_id.devno = sch->schib.pmcw.dev; priv->dev_id.ssid = sch->schid.ssid; - priv->schid = sch->schid; INIT_WORK(&priv->todo_work, ccw_device_todo); INIT_LIST_HEAD(&priv->cmb_list); @@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, put_device(&old_sch->dev); /* Initialize new subchannel. */ spin_lock_irq(sch->lock); - cdev->private->schid = sch->schid; cdev->ccwlock = sch->lock; if (!sch_is_pseudo_sch(sch)) sch_set_cdev(sch, cdev); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index a69f702a2..877d9f601 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -97,7 +97,7 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags) } /** - * ccw_device_is_pathgroup - determine if paths to this device are grouped + * ccw_device_is_pathgroup() - determine if paths to this device are grouped * @cdev: ccw device * * Return non-zero if there is a path group, zero otherwise. @@ -109,7 +109,7 @@ int ccw_device_is_pathgroup(struct ccw_device *cdev) EXPORT_SYMBOL(ccw_device_is_pathgroup); /** - * ccw_device_is_multipath - determine if device is operating in multipath mode + * ccw_device_is_multipath() - determine if device is operating in multipath mode * @cdev: ccw device * * Return non-zero if device is operating in multipath mode, zero otherwise. @@ -457,7 +457,7 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev) } /** - * chp_get_chp_desc - return newly allocated channel-path descriptor + * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor * @cdev: device to obtain the descriptor for * @chp_idx: index of the channel path * @@ -477,7 +477,7 @@ struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, } /** - * ccw_device_get_id - obtain a ccw device id + * ccw_device_get_id() - obtain a ccw device id * @cdev: device to obtain the id for * @dev_id: where to fill in the values */ @@ -488,7 +488,7 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) EXPORT_SYMBOL(ccw_device_get_id); /** - * ccw_device_tm_start_key - perform start function + * ccw_device_tm_start_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler @@ -533,7 +533,7 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, EXPORT_SYMBOL(ccw_device_tm_start_key); /** - * ccw_device_tm_start_timeout_key - perform start function + * ccw_device_tm_start_timeout_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler @@ -559,7 +559,7 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); /** - * ccw_device_tm_start - perform start function + * ccw_device_tm_start() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler @@ -577,7 +577,7 @@ int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw, EXPORT_SYMBOL(ccw_device_tm_start); /** - * ccw_device_tm_start_timeout - perform start function + * ccw_device_tm_start_timeout() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler @@ -596,7 +596,7 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, EXPORT_SYMBOL(ccw_device_tm_start_timeout); /** - * ccw_device_get_mdc - accumulate max data count + * ccw_device_get_mdc() - accumulate max data count * @cdev: ccw device for which the max data count is accumulated * @mask: mask of paths to use * @@ -642,7 +642,7 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) EXPORT_SYMBOL(ccw_device_get_mdc); /** - * ccw_device_tm_intrg - perform interrogate function + * ccw_device_tm_intrg() - perform interrogate function * @cdev: ccw device on which to perform the interrogate function * * Perform an interrogate function on the given ccw device. Return zero on @@ -664,7 +664,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev) EXPORT_SYMBOL(ccw_device_tm_intrg); /** - * ccw_device_get_schid - obtain a subchannel id + * ccw_device_get_schid() - obtain a subchannel id * @cdev: device to obtain the id for * @schid: where to fill in the values */ diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 15b56a15d..9bc351237 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -26,6 +26,7 @@ static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); char dbf_text[15]; if (!scsw_is_valid_cstat(&irb->scsw) || @@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) "received" " ... device %04x on subchannel 0.%x.%04x, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->dev_id.devno, cdev->private->schid.ssid, - cdev->private->schid.sch_no, + cdev->private->dev_id.devno, sch->schid.ssid, + sch->schid.sch_no, scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); - sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); + sprintf(dbf_text, "chk%x", sch->schid.sch_no); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 22b581046..89a787790 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h @@ -4,7 +4,7 @@ */ #ifndef S390_IDSET_H -#define S390_IDSET_H S390_IDSET_H +#define S390_IDSET_H #include <asm/schid.h> diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8975060af..220f49145 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -120,7 +120,6 @@ struct ccw_device_private { int state; /* device state */ atomic_t onoff; struct ccw_dev_id dev_id; /* device id */ - struct subchannel_id schid; /* subchannel number */ struct ccw_request req; /* internal I/O request */ int iretry; u8 pgid_valid_mask; /* mask of valid PGIDs */ diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index 989848186..8225da619 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -12,7 +12,7 @@ #include "orb.h" #include "cio.h" -int stsch(struct subchannel_id schid, struct schib *addr) +static inline int __stsch(struct subchannel_id schid, struct schib *addr) { register struct subchannel_id reg1 asm ("1") = schid; int ccode = -EIO; @@ -26,13 +26,21 @@ int stsch(struct subchannel_id schid, struct schib *addr) : "+d" (ccode), "=m" (*addr) : "d" (reg1), "a" (addr) : "cc"); + return ccode; +} + +int stsch(struct subchannel_id schid, struct schib *addr) +{ + int ccode; + + ccode = __stsch(schid, addr); trace_s390_cio_stsch(schid, addr, ccode); return ccode; } EXPORT_SYMBOL(stsch); -int msch(struct subchannel_id schid, struct schib *addr) +static inline int __msch(struct subchannel_id schid, struct schib *addr) { register struct subchannel_id reg1 asm ("1") = schid; int ccode = -EIO; @@ -46,12 +54,20 @@ int msch(struct subchannel_id schid, struct schib *addr) : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); + return ccode; +} + +int msch(struct subchannel_id schid, struct schib *addr) +{ + int ccode; + + ccode = __msch(schid, addr); trace_s390_cio_msch(schid, addr, ccode); return ccode; } -int tsch(struct subchannel_id schid, struct irb *addr) +static inline int __tsch(struct subchannel_id schid, struct irb *addr) { register struct subchannel_id reg1 asm ("1") = schid; int ccode; @@ -63,12 +79,20 @@ int tsch(struct subchannel_id schid, struct irb *addr) : "=d" (ccode), "=m" (*addr) : "d" (reg1), "a" (addr) : "cc"); + return ccode; +} + +int tsch(struct subchannel_id schid, struct irb *addr) +{ + int ccode; + + ccode = __tsch(schid, addr); trace_s390_cio_tsch(schid, addr, ccode); return ccode; } -int ssch(struct subchannel_id schid, union orb *addr) +static inline int __ssch(struct subchannel_id schid, union orb *addr) { register struct subchannel_id reg1 asm("1") = schid; int ccode = -EIO; @@ -82,13 +106,21 @@ int ssch(struct subchannel_id schid, union orb *addr) : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc", "memory"); + return ccode; +} + +int ssch(struct subchannel_id schid, union orb *addr) +{ + int ccode; + + ccode = __ssch(schid, addr); trace_s390_cio_ssch(schid, addr, ccode); return ccode; } EXPORT_SYMBOL(ssch); -int csch(struct subchannel_id schid) +static inline int __csch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; int ccode; @@ -100,6 +132,14 @@ int csch(struct subchannel_id schid) : "=d" (ccode) : "d" (reg1) : "cc"); + return ccode; +} + +int csch(struct subchannel_id schid) +{ + int ccode; + + ccode = __csch(schid); trace_s390_cio_csch(schid, ccode); return ccode; @@ -140,7 +180,7 @@ int chsc(void *chsc_area) } EXPORT_SYMBOL(chsc); -int rchp(struct chp_id chpid) +static inline int __rchp(struct chp_id chpid) { register struct chp_id reg1 asm ("1") = chpid; int ccode; @@ -151,12 +191,20 @@ int rchp(struct chp_id chpid) " ipm %0\n" " srl %0,28" : "=d" (ccode) : "d" (reg1) : "cc"); + return ccode; +} + +int rchp(struct chp_id chpid) +{ + int ccode; + + ccode = __rchp(chpid); trace_s390_cio_rchp(chpid, ccode); return ccode; } -int rsch(struct subchannel_id schid) +static inline int __rsch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; int ccode; @@ -168,12 +216,21 @@ int rsch(struct subchannel_id schid) : "=d" (ccode) : "d" (reg1) : "cc", "memory"); + + return ccode; +} + +int rsch(struct subchannel_id schid) +{ + int ccode; + + ccode = __rsch(schid); trace_s390_cio_rsch(schid, ccode); return ccode; } -int hsch(struct subchannel_id schid) +static inline int __hsch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; int ccode; @@ -185,12 +242,20 @@ int hsch(struct subchannel_id schid) : "=d" (ccode) : "d" (reg1) : "cc"); + return ccode; +} + +int hsch(struct subchannel_id schid) +{ + int ccode; + + ccode = __hsch(schid); trace_s390_cio_hsch(schid, ccode); return ccode; } -int xsch(struct subchannel_id schid) +static inline int __xsch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; int ccode; @@ -202,6 +267,14 @@ int xsch(struct subchannel_id schid) : "=d" (ccode) : "d" (reg1) : "cc"); + return ccode; +} + +int xsch(struct subchannel_id schid) +{ + int ccode; + + ccode = __xsch(schid); trace_s390_cio_xsch(schid, ccode); return ccode; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4bb5262f7..71bf9bded 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) q->qdio_error = 0; } +static inline int qdio_tasklet_schedule(struct qdio_q *q) +{ + if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { + tasklet_schedule(&q->tasklet); + return 0; + } + return -EPERM; +} + static void __qdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); @@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) * is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); else - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + 10 * HZ); return; sched: - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } /* outbound tasklet */ @@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) @@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) for_each_output_queue(q->irq_ptr, out, i) if (!qdio_outbound_q_done(out)) - tasklet_schedule(&out->tasklet); + qdio_tasklet_schedule(out); } static void __tiqdio_inbound_processing(struct qdio_q *q) @@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) int i; struct qdio_q *q; - if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; for_each_input_queue(irq_ptr, q, i) { @@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) continue; if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) qdio_siga_sync_q(q); - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int cstat, dstat; if (!intparm || !irq_ptr) { - DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_ERROR("qint:%4x", schid.sch_no); return; } @@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, int qdio_get_ssqd_desc(struct ccw_device *cdev, struct qdio_ssqd_desc *data) { + struct subchannel_id schid; if (!cdev || !cdev->private) return -EINVAL; - DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); - return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("get ssqd:%4x", schid.sch_no); + return qdio_setup_get_ssqd(NULL, &schid, data); } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); @@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) tasklet_kill(&q->tasklet); for_each_output_queue(irq_ptr, q, i) { - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); tasklet_kill(&q->tasklet); } } @@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) int qdio_shutdown(struct ccw_device *cdev, int how) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int rc; - unsigned long flags; if (!irq_ptr) return -ENODEV; WARN_ON_ONCE(irqs_disabled()); - DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qshutdown:%4x", schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); /* @@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) qdio_shutdown_debug_entries(irq_ptr); /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + spin_lock_irq(get_ccwdev_lock(cdev)); if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); @@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) } qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + spin_unlock_irq(get_ccwdev_lock(cdev)); wait_event_interruptible_timeout(cdev->private->wait_q, irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || irq_ptr->state == QDIO_IRQ_STATE_ERR, 10 * HZ); - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + spin_lock_irq(get_ccwdev_lock(cdev)); no_cleanup: qdio_shutdown_thinint(irq_ptr); @@ -1211,7 +1216,7 @@ no_cleanup: /* restore interrupt handler */ if ((void *)cdev->handler == (void *)qdio_int_handler) cdev->handler = irq_ptr->orig_handler; - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + spin_unlock_irq(get_ccwdev_lock(cdev)); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); mutex_unlock(&irq_ptr->setup_mutex); @@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); int qdio_free(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; if (!irq_ptr) return -ENODEV; - DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qfree:%4x", schid.sch_no); DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); mutex_lock(&irq_ptr->setup_mutex); @@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free); */ int qdio_allocate(struct qdio_initialize *init_data) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; - DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); + ccw_device_get_schid(init_data->cdev, &schid); + DBF_EVENT("qallocate:%4x", schid.sch_no); if ((init_data->no_input_qs && !init_data->input_handler) || (init_data->no_output_qs && !init_data->output_handler)) @@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) */ int qdio_establish(struct qdio_initialize *init_data) { - struct qdio_irq *irq_ptr; struct ccw_device *cdev = init_data->cdev; - unsigned long saveflags; + struct subchannel_id schid; + struct qdio_irq *irq_ptr; int rc; - DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qestablish:%4x", schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - mutex_lock(&irq_ptr->setup_mutex); qdio_setup_irq(init_data); @@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data) irq_ptr->ccw.count = irq_ptr->equeue.count; irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options_mask(cdev, 0); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); - - if (rc) { mutex_unlock(&irq_ptr->setup_mutex); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; @@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish); */ int qdio_activate(struct ccw_device *cdev) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; int rc; - unsigned long saveflags; - DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qactivate:%4x", schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { rc = -EBUSY; @@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev) irq_ptr->ccw.count = irq_ptr->aqueue.count; irq_ptr->ccw.cda = 0; - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); - - if (rc) goto out; + } if (is_thinint_irq(irq_ptr)) tiqdio_add_input_queues(irq_ptr); @@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* in case of SIGA errors we must process the error immediately */ if (used >= q->u.out.scan_threshold || rc) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 327255da1..ed92fb09f 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -169,6 +169,19 @@ static int ap_configuration_available(void) return test_facility(12); } +static inline struct ap_queue_status +__pqap_tapq(ap_qid_t qid, unsigned long *info) +{ + register unsigned long reg0 asm ("0") = qid; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); + *info = reg2; + return reg1; +} + /** * ap_test_queue(): Test adjunct processor queue. * @qid: The AP queue number @@ -179,17 +192,15 @@ static int ap_configuration_available(void) static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, unsigned long *info) { - register unsigned long reg0 asm ("0") = qid; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = 0UL; + struct ap_queue_status aqs; + unsigned long _info; if (test_facility(15)) - reg0 |= 1UL << 23; /* set APFT T bit*/ - asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); + qid |= 1UL << 23; /* set APFT T bit*/ + aqs = __pqap_tapq(qid, &_info); if (info) - *info = reg2; - return reg1; + *info = _info; + return aqs; } /** @@ -237,14 +248,12 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind) * * Returns 0 on success, or -EOPNOTSUPP. */ -static inline int ap_query_configuration(void) +static inline int __ap_query_configuration(void) { register unsigned long reg0 asm ("0") = 0x04000000UL; register unsigned long reg1 asm ("1") = -EINVAL; register void *reg2 asm ("2") = (void *) ap_configuration; - if (!ap_configuration) - return -EOPNOTSUPP; asm volatile( ".long 0xb2af0000\n" /* PQAP(QCI) */ "0: la %1,0\n" @@ -257,6 +266,13 @@ static inline int ap_query_configuration(void) return reg1; } +static inline int ap_query_configuration(void) +{ + if (!ap_configuration) + return -EOPNOTSUPP; + return __ap_query_configuration(); +} + /** * ap_init_configuration(): Allocate and query configuration array. */ @@ -346,6 +362,26 @@ static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind) } } +static inline struct ap_queue_status +__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) +{ + typedef struct { char _[length]; } msgblock; + register unsigned long reg0 asm ("0") = qid | 0x40000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = (unsigned long) msg; + register unsigned long reg3 asm ("3") = (unsigned long) length; + register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); + register unsigned long reg5 asm ("5") = psmid & 0xffffffff; + + asm volatile ( + "0: .long 0xb2ad0042\n" /* NQAP */ + " brc 2,0b" + : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) + : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) + : "cc"); + return reg1; +} + /** * __ap_send(): Send message to adjunct processor queue. * @qid: The AP queue number @@ -363,24 +399,9 @@ static inline struct ap_queue_status __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, unsigned int special) { - typedef struct { char _[length]; } msgblock; - register unsigned long reg0 asm ("0") = qid | 0x40000000UL; - register struct ap_queue_status reg1 asm ("1"); - register unsigned long reg2 asm ("2") = (unsigned long) msg; - register unsigned long reg3 asm ("3") = (unsigned long) length; - register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); - register unsigned long reg5 asm ("5") = psmid & 0xffffffff; - if (special == 1) - reg0 |= 0x400000UL; - - asm volatile ( - "0: .long 0xb2ad0042\n" /* NQAP */ - " brc 2,0b" - : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) - : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) - : "cc" ); - return reg1; + qid |= 0x400000UL; + return __nqap(qid, psmid, msg, length); } int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) @@ -447,6 +468,8 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) { struct ap_queue_status status; + if (msg == NULL) + return -EINVAL; status = __ap_recv(qid, psmid, msg, length); switch (status.response_code) { case AP_RESPONSE_NORMAL: @@ -596,6 +619,8 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev) { struct ap_queue_status status; + if (!ap_dev->reply) + return AP_WAIT_NONE; status = ap_sm_recv(ap_dev); switch (status.response_code) { case AP_RESPONSE_NORMAL: @@ -617,6 +642,31 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev) } /** + * ap_sm_suspend_read(): Receive pending reply messages from an AP device + * without changing the device state in between. In suspend mode we don't + * allow sending new requests, therefore just fetch pending replies. + * @ap_dev: pointer to the AP device + * + * Returns AP_WAIT_NONE or AP_WAIT_AGAIN + */ +static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev) +{ + struct ap_queue_status status; + + if (!ap_dev->reply) + return AP_WAIT_NONE; + status = ap_sm_recv(ap_dev); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (ap_dev->queue_count > 0) + return AP_WAIT_AGAIN; + /* fall through */ + default: + return AP_WAIT_NONE; + } +} + +/** * ap_sm_write(): Send messages from the request queue to an AP device. * @ap_dev: pointer to the AP device * @@ -717,7 +767,7 @@ static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev) struct ap_queue_status status; unsigned long info; - if (ap_dev->queue_count > 0) + if (ap_dev->queue_count > 0 && ap_dev->reply) /* Try to read a completed message and get the status */ status = ap_sm_recv(ap_dev); else @@ -757,7 +807,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev) struct ap_queue_status status; unsigned long info; - if (ap_dev->queue_count > 0) + if (ap_dev->queue_count > 0 && ap_dev->reply) /* Try to read a completed message and get the status */ status = ap_sm_recv(ap_dev); else @@ -813,7 +863,7 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { [AP_EVENT_TIMEOUT] = ap_sm_reset, }, [AP_STATE_SUSPEND_WAIT] = { - [AP_EVENT_POLL] = ap_sm_read, + [AP_EVENT_POLL] = ap_sm_suspend_read, [AP_EVENT_TIMEOUT] = ap_sm_nop, }, [AP_STATE_BORKED] = { @@ -1314,6 +1364,17 @@ static struct bus_type ap_bus_type = { .resume = ap_dev_resume, }; +void ap_device_init_reply(struct ap_device *ap_dev, + struct ap_message *reply) +{ + ap_dev->reply = reply; + + spin_lock_bh(&ap_dev->lock); + ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); + spin_unlock_bh(&ap_dev->lock); +} +EXPORT_SYMBOL(ap_device_init_reply); + static int ap_device_probe(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); @@ -1758,7 +1819,8 @@ int __init ap_module_init(void) if (ap_domain_index < -1 || ap_domain_index > max_domain_id) { pr_warn("%d is not a valid cryptographic domain\n", ap_domain_index); - return -EINVAL; + rc = -EINVAL; + goto out_free; } /* In resume callback we need to know if the user had set the domain. * If so, we can not just reset it. @@ -1831,6 +1893,7 @@ out: unregister_reset_call(&ap_reset_call); if (ap_using_interrupts()) unregister_adapter_interrupt(&ap_airq); +out_free: kfree(ap_configuration); return rc; } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 6adcbdf22..d7fdf5c02 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -262,6 +262,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); void ap_flush_queue(struct ap_device *ap_dev); void ap_bus_force_rescan(void); +void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg); int ap_module_init(void); void ap_module_exit(void); diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 1e849d6e1..15104aaa0 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -126,7 +126,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev) MSGTYPE50_VARIANT_DEFAULT); zdev->ap_dev = ap_dev; zdev->online = 1; - ap_dev->reply = &zdev->reply; + ap_device_init_reply(ap_dev, &zdev->reply); ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) { diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index bb3908818..ccb2e78eb 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -147,7 +147,7 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev) return -ENODEV; zdev->ap_dev = ap_dev; zdev->online = 1; - ap_dev->reply = &zdev->reply; + ap_device_init_reply(ap_dev, &zdev->reply); ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) { diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index f41852768..df8f0c4da 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -327,7 +327,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) else zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); - ap_dev->reply = &zdev->reply; + ap_device_init_reply(ap_dev, &zdev->reply); ap_dev->private = zdev; rc = zcrypt_device_register(zdev); if (rc) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index ec2e014e8..6d4b68c48 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -19,6 +19,7 @@ #include <linux/seq_file.h> #include <linux/ethtool.h> #include <linux/hashtable.h> +#include <linux/ip.h> #include <net/ipv6.h> #include <net/if_inet6.h> @@ -144,6 +145,7 @@ struct qeth_perf_stats { unsigned int sg_alloc_page_rx; unsigned int tx_csum; unsigned int tx_lin; + unsigned int tx_linfail; }; /* Routing stuff */ @@ -559,7 +561,6 @@ enum qeth_ip_types { QETH_IP_TYPE_NORMAL, QETH_IP_TYPE_VIPA, QETH_IP_TYPE_RXIP, - QETH_IP_TYPE_DEL_ALL_MC, }; enum qeth_cmd_buffer_state { @@ -740,17 +741,10 @@ struct qeth_vlan_vid { unsigned short vid; }; -enum qeth_mac_disposition { - QETH_DISP_MAC_DELETE = 0, - QETH_DISP_MAC_DO_NOTHING = 1, - QETH_DISP_MAC_ADD = 2, -}; - -struct qeth_mac { - u8 mac_addr[OSA_ADDR_LEN]; - u8 is_uc:1; - u8 disp_flag:2; - struct hlist_node hnode; +enum qeth_addr_disposition { + QETH_DISP_ADDR_DELETE = 0, + QETH_DISP_ADDR_DO_NOTHING = 1, + QETH_DISP_ADDR_ADD = 2, }; struct qeth_rx { @@ -798,6 +792,8 @@ struct qeth_card { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct list_head vid_list; DECLARE_HASHTABLE(mac_htable, 4); + DECLARE_HASHTABLE(ip_htable, 4); + DECLARE_HASHTABLE(ip_mc_htable, 4); struct work_struct kernel_thread_starter; spinlock_t thread_mask_lock; unsigned long thread_start_mask; @@ -805,8 +801,6 @@ struct qeth_card { unsigned long thread_running_mask; struct task_struct *recovery_task; spinlock_t ip_lock; - struct list_head ip_list; - struct list_head *ip_tbd_list; struct qeth_ipato ipato; struct list_head cmd_waiter_list; /* QDIO buffer handling */ @@ -844,6 +838,19 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +/** + * qeth_get_elements_for_range() - find number of SBALEs to cover range. + * @start: Start of the address range. + * @end: Address after the end of the range. + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * the specified address range. + */ +static inline int qeth_get_elements_for_range(addr_t start, addr_t end) +{ + return PFN_UP(end - 1) - PFN_DOWN(start); +} + static inline int qeth_get_micros(void) { return (int) (get_tod_clock() >> 12); @@ -865,6 +872,11 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } +static inline int qeth_get_ip_protocol(struct sk_buff *skb) +{ + return ip_hdr(skb)->protocol; +} + static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, struct qeth_buffer_pool_entry *entry) { @@ -981,12 +993,14 @@ int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *); +int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs, __u16, __u16, enum qeth_prot_versions); -int qeth_start_ipa_tx_checksum(struct qeth_card *); -int qeth_set_rx_csum(struct qeth_card *, int); +int qeth_set_features(struct net_device *, netdev_features_t); +int qeth_recover_features(struct net_device *); +netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b7b74776e..20cf29613 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1464,8 +1464,6 @@ static int qeth_setup_card(struct qeth_card *card) card->thread_allowed_mask = 0; card->thread_running_mask = 0; INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); - INIT_LIST_HEAD(&card->ip_list); - INIT_LIST_HEAD(card->ip_tbd_list); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); /* initial options */ @@ -1500,11 +1498,6 @@ static struct qeth_card *qeth_alloc_card(void) if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); - card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); - if (!card->ip_tbd_list) { - QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); - goto out_card; - } if (qeth_setup_channel(&card->read)) goto out_ip; if (qeth_setup_channel(&card->write)) @@ -1517,8 +1510,6 @@ static struct qeth_card *qeth_alloc_card(void) out_channel: qeth_clean_channel(&card->read); out_ip: - kfree(card->ip_tbd_list); -out_card: kfree(card); out: return NULL; @@ -3628,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, int e; e = 0; - while (buffer->element[e].addr) { + while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && + buffer->element[e].addr) { unsigned long phys_aob_addr; phys_aob_addr = (unsigned long) buffer->element[e].addr; @@ -3757,6 +3749,14 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); +/* We cannot use outbound queue 3 for unicast packets on HiperSockets */ +static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) +{ + if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3)) + return 2; + return queue_num; +} + /** * Note: Function assumes that we have 4 outbound queues. */ @@ -3784,9 +3784,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, return card->qdio.default_out_queue; } if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) - return ~tos >> 6 & 3; + return qeth_cut_iqd_prio(card, ~tos >> 6 & 3); if (tos & IPTOS_MINCOST) - return 3; + return qeth_cut_iqd_prio(card, 3); if (tos & IPTOS_RELIABILITY) return 2; if (tos & IPTOS_THROUGHPUT) @@ -3797,11 +3797,12 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, case QETH_PRIO_Q_ING_SKB: if (skb->priority > 5) return 0; - return ~skb->priority >> 1 & 3; + return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); case QETH_PRIO_Q_ING_VLAN: tci = &((struct ethhdr *)skb->data)->h_proto; if (*tci == ETH_P_8021Q) - return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3; + return qeth_cut_iqd_prio(card, ~*(tci + 1) >> + (VLAN_PRIO_SHIFT + 1) & 3); break; default: break; @@ -3810,41 +3811,54 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); +/** + * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. + * @skb: SKB address + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * fragmented part of the SKB. Returns zero for linear SKB. + */ int qeth_get_elements_for_frags(struct sk_buff *skb) { - int cnt, length, e, elements = 0; - struct skb_frag_struct *frag; - char *data; + int cnt, elements = 0; for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - frag = &skb_shinfo(skb)->frags[cnt]; - data = (char *)page_to_phys(skb_frag_page(frag)) + - frag->page_offset; - length = frag->size; - e = PFN_UP((unsigned long)data + length - 1) - - PFN_DOWN((unsigned long)data); - elements += e; + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; + + elements += qeth_get_elements_for_range( + (addr_t)skb_frag_address(frag), + (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); } return elements; } EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); +/** + * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags. + * @card: qeth card structure, to check max. elems. + * @skb: SKB address + * @extra_elems: extra elems needed, to check against max. + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * skb data, including linear part and fragments. Checks if the result plus + * extra_elems fits under the limit for the card. Returns 0 if it does not. + * Note: extra_elems is not included in the returned result. + */ int qeth_get_elements_no(struct qeth_card *card, - struct sk_buff *skb, int elems) + struct sk_buff *skb, int extra_elems) { - int dlen = skb->len - skb->data_len; - int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - - PFN_DOWN((unsigned long)skb->data); - - elements_needed += qeth_get_elements_for_frags(skb); + int elements = qeth_get_elements_for_range( + (addr_t)skb->data, + (addr_t)skb->data + skb_headlen(skb)) + + qeth_get_elements_for_frags(skb); - if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { + if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", - (elements_needed+elems), skb->len); + elements + extra_elems, skb->len); return 0; } - return elements_needed; + return elements; } EXPORT_SYMBOL_GPL(qeth_get_elements_no); @@ -3859,7 +3873,7 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) rest = len - inpage; if (rest > hroom) return 1; - memmove(skb->data - rest, skb->data, skb->len - skb->data_len); + memmove(skb->data - rest, skb->data, skb_headlen(skb)); skb->data -= rest; skb->tail -= rest; *hdr = (struct qeth_hdr *)skb->data; @@ -3873,7 +3887,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, int offset) { - int length = skb->len - skb->data_len; + int length = skb_headlen(skb); int length_here; int element; char *data; @@ -4967,7 +4981,6 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->write); if (card->dev) free_netdev(card->dev); - kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); @@ -5265,8 +5278,8 @@ no_mem: } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); -static int qeth_setassparms_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) +int qeth_setassparms_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; @@ -5294,6 +5307,7 @@ static int qeth_setassparms_cb(struct qeth_card *card, return 0; } +EXPORT_SYMBOL_GPL(qeth_setassparms_cb); struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func, @@ -5788,6 +5802,7 @@ static struct { {"tx do_QDIO count"}, {"tx csum"}, {"tx lin"}, + {"tx linfail"}, {"cq handler count"}, {"cq handler time"} }; @@ -5848,8 +5863,9 @@ void qeth_core_get_ethtool_stats(struct net_device *dev, data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; data[34] = card->perf_stats.tx_lin; - data[35] = card->perf_stats.cq_cnt; - data[36] = card->perf_stats.cq_time; + data[35] = card->perf_stats.tx_linfail; + data[36] = card->perf_stats.cq_cnt; + data[37] = card->perf_stats.cq_time; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); @@ -6048,74 +6064,165 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); -static int qeth_send_checksum_command(struct qeth_card *card) +static int qeth_send_checksum_on(struct qeth_card *card, int cstype) { + long rxtx_arg; int rc; - rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, - IPA_CMD_ASS_START, 0); + rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0); if (rc) { - dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " - "failed, using SW checksumming\n", - QETH_CARD_IFNAME(card)); + dev_warn(&card->gdev->dev, + "Starting HW checksumming for %s failed, using SW checksumming\n", + QETH_CARD_IFNAME(card)); return rc; } - rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, - IPA_CMD_ASS_ENABLE, - card->info.csum_mask); + rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask + : card->info.csum_mask; + rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE, + rxtx_arg); if (rc) { - dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " - "failed, using SW checksumming\n", - QETH_CARD_IFNAME(card)); + dev_warn(&card->gdev->dev, + "Enabling HW checksumming for %s failed, using SW checksumming\n", + QETH_CARD_IFNAME(card)); return rc; } + + dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n", + cstype == IPA_INBOUND_CHECKSUM ? "in" : "out"); return 0; } -int qeth_set_rx_csum(struct qeth_card *card, int on) +static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype) { int rc; if (on) { - rc = qeth_send_checksum_command(card); + rc = qeth_send_checksum_on(card, cstype); if (rc) return -EIO; - dev_info(&card->gdev->dev, - "HW Checksumming (inbound) enabled\n"); } else { - rc = qeth_send_simple_setassparms(card, - IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); + rc = qeth_send_simple_setassparms(card, cstype, + IPA_CMD_ASS_STOP, 0); if (rc) return -EIO; } return 0; } -EXPORT_SYMBOL_GPL(qeth_set_rx_csum); -int qeth_start_ipa_tx_checksum(struct qeth_card *card) +static int qeth_set_ipa_tso(struct qeth_card *card, int on) { - int rc = 0; + int rc; - if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) - return rc; - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM, - IPA_CMD_ASS_START, 0); - if (rc) - goto err_out; - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM, - IPA_CMD_ASS_ENABLE, - card->info.tx_csum_mask); - if (rc) - goto err_out; + QETH_CARD_TEXT(card, 3, "sttso"); - dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n"); - return rc; -err_out: - dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s " - "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card)); + if (on) { + rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting outbound TCP segmentation offload for %s failed\n", + QETH_CARD_IFNAME(card)); + return -EIO; + } + dev_info(&card->gdev->dev, "Outbound TSO enabled\n"); + } else { + rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_STOP, 0); + } return rc; } -EXPORT_SYMBOL_GPL(qeth_start_ipa_tx_checksum); + +/* try to restore device features on a device after recovery */ +int qeth_recover_features(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + netdev_features_t recover = dev->features; + + if (recover & NETIF_F_IP_CSUM) { + if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM)) + recover ^= NETIF_F_IP_CSUM; + } + if (recover & NETIF_F_RXCSUM) { + if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM)) + recover ^= NETIF_F_RXCSUM; + } + if (recover & NETIF_F_TSO) { + if (qeth_set_ipa_tso(card, 1)) + recover ^= NETIF_F_TSO; + } + + if (recover == dev->features) + return 0; + + dev_warn(&card->gdev->dev, + "Device recovery failed to restore all offload features\n"); + dev->features = recover; + return -EIO; +} +EXPORT_SYMBOL_GPL(qeth_recover_features); + +int qeth_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + netdev_features_t changed = dev->features ^ features; + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "setfeat"); + QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); + + if ((changed & NETIF_F_IP_CSUM)) { + rc = qeth_set_ipa_csum(card, + features & NETIF_F_IP_CSUM ? 1 : 0, + IPA_OUTBOUND_CHECKSUM); + if (rc) + changed ^= NETIF_F_IP_CSUM; + } + if ((changed & NETIF_F_RXCSUM)) { + rc = qeth_set_ipa_csum(card, + features & NETIF_F_RXCSUM ? 1 : 0, + IPA_INBOUND_CHECKSUM); + if (rc) + changed ^= NETIF_F_RXCSUM; + } + if ((changed & NETIF_F_TSO)) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0); + if (rc) + changed ^= NETIF_F_TSO; + } + + /* everything changed successfully? */ + if ((dev->features ^ features) == changed) + return 0; + /* something went wrong. save changed features and return error */ + dev->features ^= changed; + return -EIO; +} +EXPORT_SYMBOL_GPL(qeth_set_features); + +netdev_features_t qeth_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_DBF_TEXT(SETUP, 2, "fixfeat"); + if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) + features &= ~NETIF_F_IP_CSUM; + if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) + features &= ~NETIF_F_RXCSUM; + if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + features &= ~NETIF_F_TSO; + dev_info(&card->gdev->dev, "Outbound TSO not supported on %s\n", + QETH_CARD_IFNAME(card)); + } + /* if the card isn't up, remove features that require hw changes */ + if (card->state == CARD_STATE_DOWN || + card->state == CARD_STATE_RECOVER) + features = features & ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO); + QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); + return features; +} +EXPORT_SYMBOL_GPL(qeth_fix_features); static int __init qeth_core_init(void) { diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index e6e5b9671..75b29fd2f 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -243,6 +243,10 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 2; } else if (sysfs_streq(buf, "no_prio_queueing:3")) { + if (card->info.type == QETH_CARD_TYPE_IQD) { + rc = -EPERM; + goto out; + } card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 3; } else if (sysfs_streq(buf, "no_prio_queueing")) { diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index 076755640..29d9fb389 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -12,4 +12,11 @@ int qeth_l2_create_device_attributes(struct device *); void qeth_l2_remove_device_attributes(struct device *); void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +struct qeth_mac { + u8 mac_addr[OSA_ADDR_LEN]; + u8 is_uc:1; + u8 disp_flag:2; + struct hlist_node hnode; +}; + #endif /* __QETH_L2_H__ */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index df036b872..bb27058fa 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -404,38 +404,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, return rc; } -static netdev_features_t qeth_l2_fix_features(struct net_device *dev, - netdev_features_t features) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_DBF_TEXT(SETUP, 2, "fixfeat"); - if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) - features &= ~NETIF_F_IP_CSUM; - if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) - features &= ~NETIF_F_RXCSUM; - QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); - return features; -} - -static int qeth_l2_set_features(struct net_device *dev, - netdev_features_t features) -{ - struct qeth_card *card = dev->ml_priv; - netdev_features_t changed = dev->features ^ features; - - QETH_DBF_TEXT(SETUP, 2, "setfeat"); - QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); - - if (card->state == CARD_STATE_DOWN || - card->state == CARD_STATE_RECOVER) - return 0; - - if (!(changed & NETIF_F_RXCSUM)) - return 0; - return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0); -} - static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) { QETH_DBF_TEXT(SETUP , 2, "stopcard"); @@ -780,7 +748,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) qeth_l2_mac_hash(ha->addr)) { if (is_uc == mac->is_uc && !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) { - mac->disp_flag = QETH_DISP_MAC_DO_NOTHING; + mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; return; } } @@ -792,7 +760,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN); mac->is_uc = is_uc; - mac->disp_flag = QETH_DISP_MAC_ADD; + mac->disp_flag = QETH_DISP_ADDR_ADD; hash_add(card->mac_htable, &mac->hnode, qeth_l2_mac_hash(mac->mac_addr)); @@ -825,7 +793,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_l2_add_mac(card, ha, 1); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { - if (mac->disp_flag == QETH_DISP_MAC_DELETE) { + if (mac->disp_flag == QETH_DISP_ADDR_DELETE) { if (!mac->is_uc) rc = qeth_l2_send_delgroupmac(card, mac->mac_addr); @@ -837,15 +805,15 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) hash_del(&mac->hnode); kfree(mac); - } else if (mac->disp_flag == QETH_DISP_MAC_ADD) { + } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) { rc = qeth_l2_write_mac(card, mac); if (rc) { hash_del(&mac->hnode); kfree(mac); } else - mac->disp_flag = QETH_DISP_MAC_DELETE; + mac->disp_flag = QETH_DISP_ADDR_DELETE; } else - mac->disp_flag = QETH_DISP_MAC_DELETE; + mac->disp_flag = QETH_DISP_ADDR_DELETE; } spin_unlock_bh(&card->mclock); @@ -869,6 +837,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) int data_offset = -1; int elements_needed = 0; int hd_len = 0; + int nr_frags; if (card->qdio.do_prio_queueing || (cast_type && card->info.is_multicast_different)) @@ -892,6 +861,23 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } netif_stop_queue(dev); + /* fix hardware limitation: as long as we do not have sbal + * chaining we can not send long frag lists + */ + if ((card->info.type != QETH_CARD_TYPE_IQD) && + !qeth_get_elements_no(card, new_skb, 0)) { + int lin_rc = skb_linearize(new_skb); + + if (card->options.performance_stats) { + if (lin_rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (lin_rc) + goto tx_drop; + } + if (card->info.type == QETH_CARD_TYPE_OSN) hdr = (struct qeth_hdr *)skb->data; else { @@ -943,6 +929,14 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!rc) { card->stats.tx_packets++; card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) { + nr_frags = skb_shinfo(new_skb)->nr_frags; + if (nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; + } + } if (new_skb != skb) dev_kfree_skb_any(skb); rc = NETDEV_TX_OK; @@ -1087,8 +1081,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, - .ndo_fix_features = qeth_l2_fix_features, - .ndo_set_features = qeth_l2_set_features + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features }; static int qeth_l2_setup_netdev(struct qeth_card *card) @@ -1119,12 +1113,22 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { - card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; - /* Turn on RX offloading per default */ - card->dev->features |= NETIF_F_RXCSUM; + card->dev->hw_features = NETIF_F_SG; + card->dev->vlan_features = NETIF_F_SG; + /* OSA 3S and earlier has no RX/TX support */ + if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { + card->dev->hw_features |= NETIF_F_IP_CSUM; + card->dev->vlan_features |= NETIF_F_IP_CSUM; + } + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { + card->dev->hw_features |= NETIF_F_RXCSUM; + card->dev->vlan_features |= NETIF_F_RXCSUM; + } } card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); + card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * + PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); @@ -1136,9 +1140,6 @@ static int qeth_l2_start_ipassists(struct qeth_card *card) /* configure isolation level */ if (qeth_set_access_ctrl_online(card, 0)) return -ENODEV; - if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) - qeth_set_rx_csum(card, 1); - qeth_start_ipa_tx_checksum(card); return 0; } @@ -1207,7 +1208,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) contin: if ((card->info.type == QETH_CARD_TYPE_OSD) || (card->info.type == QETH_CARD_TYPE_OSX)) { - if (qeth_l2_start_ipassists(card)) + rc = qeth_l2_start_ipassists(card); + if (rc) goto out_remove; } @@ -1241,6 +1243,9 @@ contin: } /* this also sets saved unicast addresses */ qeth_l2_set_rx_mode(card->dev); + rtnl_lock(); + qeth_recover_features(card->dev); + rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); @@ -1801,6 +1806,12 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, dev_err(&card->gdev->dev, "The device is not configured as a Bridge Port\n"); break; + case 0x2B10: + case 0x0010: /* OS mismatch */ + rc = -EPERM; + dev_err(&card->gdev->dev, + "A Bridge Port is already configured by a different operating system\n"); + break; case 0x2B14: case 0x0014: /* Another device is Primary */ switch (setcmd) { diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 551a4b4c0..26f79533e 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -10,16 +10,23 @@ #define __QETH_L3_H__ #include "qeth_core.h" +#include <linux/hashtable.h> #define QETH_SNIFF_AVAIL 0x0008 struct qeth_ipaddr { - struct list_head entry; + struct hlist_node hnode; enum qeth_ip_types type; enum qeth_ipa_setdelip_flags set_flags; enum qeth_ipa_setdelip_flags del_flags; - int is_multicast; - int users; + u8 is_multicast:1; + u8 in_progress:1; + u8 disp_flag:2; + + /* is changed only for normal ip addresses + * for non-normal addresses it always is 1 + */ + int ref_counter; enum qeth_prot_versions proto; unsigned char mac[OSA_ADDR_LEN]; union { @@ -32,7 +39,24 @@ struct qeth_ipaddr { unsigned int pfxlen; } a6; } u; + }; +static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) +{ + u64 ret = 0; + u8 *point; + + if (addr->proto == QETH_PROT_IPV6) { + point = (u8 *) &addr->u.a6.addr; + ret = get_unaligned((u64 *)point) ^ + get_unaligned((u64 *) (point + 8)); + } + if (addr->proto == QETH_PROT_IPV4) { + point = (u8 *) &addr->u.a4.addr; + ret = get_unaligned((u32 *) point); + } + return ret; +} struct qeth_ipato_entry { struct list_head entry; @@ -60,6 +84,5 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); -void qeth_l3_set_ip_addr_list(struct qeth_card *); #endif /* __QETH_L3_H__ */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 709b52339..272d9e741 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -30,6 +30,7 @@ #include <net/ip6_fib.h> #include <net/ip6_checksum.h> #include <net/iucv/af_iucv.h> +#include <linux/hashtable.h> #include "qeth_l3.h" @@ -57,7 +58,7 @@ static int qeth_l3_isxdigit(char *buf) static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) { - sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); + sprintf(buf, "%pI4", addr); } static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) @@ -204,104 +205,139 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, return rc; } -/* - * Add IP to be added to todo list. If there is already an "add todo" - * in this list we just incremenent the reference count. - * Returns 0 if we just incremented reference count. - */ -static int __qeth_l3_insert_ip_todo(struct qeth_card *card, - struct qeth_ipaddr *addr, int add) +inline int +qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) { - struct qeth_ipaddr *tmp, *t; - int found = 0; + return addr1->proto == addr2->proto && + !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && + !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac)); +} - if (card->options.sniffer) - return 0; - list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { - if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && - (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) - return 0; - if ((tmp->proto == QETH_PROT_IPV4) && - (addr->proto == QETH_PROT_IPV4) && - (tmp->type == addr->type) && - (tmp->is_multicast == addr->is_multicast) && - (tmp->u.a4.addr == addr->u.a4.addr) && - (tmp->u.a4.mask == addr->u.a4.mask)) { - found = 1; - break; - } - if ((tmp->proto == QETH_PROT_IPV6) && - (addr->proto == QETH_PROT_IPV6) && - (tmp->type == addr->type) && - (tmp->is_multicast == addr->is_multicast) && - (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) && - (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, - sizeof(struct in6_addr)) == 0)) { - found = 1; - break; - } - } - if (found) { - if (addr->users != 0) - tmp->users += addr->users; - else - tmp->users += add ? 1 : -1; - if (tmp->users == 0) { - list_del(&tmp->entry); - kfree(tmp); - } - return 0; +static struct qeth_ipaddr * +qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) +{ + struct qeth_ipaddr *addr; + + if (tmp_addr->is_multicast) { + hash_for_each_possible(card->ip_mc_htable, addr, + hnode, qeth_l3_ipaddr_hash(tmp_addr)) + if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) + return addr; } else { - if (addr->type == QETH_IP_TYPE_DEL_ALL_MC) - list_add(&addr->entry, card->ip_tbd_list); - else { - if (addr->users == 0) - addr->users += add ? 1 : -1; - if (add && (addr->type == QETH_IP_TYPE_NORMAL) && - qeth_l3_is_addr_covered_by_ipato(card, addr)) { - QETH_CARD_TEXT(card, 2, "tkovaddr"); - addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; - } - list_add_tail(&addr->entry, card->ip_tbd_list); - } - return 1; + hash_for_each_possible(card->ip_htable, addr, + hnode, qeth_l3_ipaddr_hash(tmp_addr)) + if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) + return addr; } + + return NULL; } -int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) +int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { - unsigned long flags; int rc = 0; + struct qeth_ipaddr *addr; QETH_CARD_TEXT(card, 4, "delip"); - if (addr->proto == QETH_PROT_IPV4) - QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); + if (tmp_addr->proto == QETH_PROT_IPV4) + QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); else { - QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); - QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); + } + + addr = qeth_l3_ip_from_hash(card, tmp_addr); + if (!addr) + return -ENOENT; + + addr->ref_counter--; + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) + return rc; + if (addr->in_progress) + return -EINPROGRESS; + + if (!qeth_card_hw_is_reachable(card)) { + addr->disp_flag = QETH_DISP_ADDR_DELETE; + return 0; } - spin_lock_irqsave(&card->ip_lock, flags); - rc = __qeth_l3_insert_ip_todo(card, addr, 0); - spin_unlock_irqrestore(&card->ip_lock, flags); + + rc = qeth_l3_deregister_addr_entry(card, addr); + + hash_del(&addr->hnode); + kfree(addr); + return rc; } -int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) +int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { - unsigned long flags; int rc = 0; + struct qeth_ipaddr *addr; QETH_CARD_TEXT(card, 4, "addip"); - if (addr->proto == QETH_PROT_IPV4) - QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); + + if (tmp_addr->proto == QETH_PROT_IPV4) + QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); else { - QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); - QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); + } + + addr = qeth_l3_ip_from_hash(card, tmp_addr); + if (!addr) { + addr = qeth_l3_get_addr_buffer(tmp_addr->proto); + if (!addr) + return -ENOMEM; + + memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); + addr->ref_counter = 1; + + if (addr->type == QETH_IP_TYPE_NORMAL && + qeth_l3_is_addr_covered_by_ipato(card, addr)) { + QETH_CARD_TEXT(card, 2, "tkovaddr"); + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + } + hash_add(card->ip_htable, &addr->hnode, + qeth_l3_ipaddr_hash(addr)); + + if (!qeth_card_hw_is_reachable(card)) { + addr->disp_flag = QETH_DISP_ADDR_ADD; + return 0; + } + + /* qeth_l3_register_addr_entry can go to sleep + * if we add a IPV4 addr. It is caused by the reason + * that SETIP ipa cmd starts ARP staff for IPV4 addr. + * Thus we should unlock spinlock, and make a protection + * using in_progress variable to indicate that there is + * an hardware operation with this IPV4 address + */ + if (addr->proto == QETH_PROT_IPV4) { + addr->in_progress = 1; + spin_unlock_bh(&card->ip_lock); + rc = qeth_l3_register_addr_entry(card, addr); + spin_lock_bh(&card->ip_lock); + addr->in_progress = 0; + } else + rc = qeth_l3_register_addr_entry(card, addr); + + if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) || + (rc == IPA_RC_LAN_OFFLINE)) { + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + if (addr->ref_counter < 1) { + qeth_l3_delete_ip(card, addr); + kfree(addr); + } + } else { + hash_del(&addr->hnode); + kfree(addr); + } + } else { + if (addr->type == QETH_IP_TYPE_NORMAL) + addr->ref_counter++; } - spin_lock_irqsave(&card->ip_lock, flags); - rc = __qeth_l3_insert_ip_todo(card, addr, 1); - spin_unlock_irqrestore(&card->ip_lock, flags); + return rc; } @@ -312,229 +348,90 @@ struct qeth_ipaddr *qeth_l3_get_addr_buffer( struct qeth_ipaddr *addr; addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); - if (addr == NULL) { + if (!addr) return NULL; - } + addr->type = QETH_IP_TYPE_NORMAL; + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; addr->proto = prot; + return addr; } -static void qeth_l3_delete_mc_addresses(struct qeth_card *card) +static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) { - struct qeth_ipaddr *iptodo; - unsigned long flags; + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; - QETH_CARD_TEXT(card, 4, "delmc"); - iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (!iptodo) { - QETH_CARD_TEXT(card, 2, "dmcnomem"); + QETH_CARD_TEXT(card, 4, "clearip"); + + if (recover && card->options.sniffer) return; - } - iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; - spin_lock_irqsave(&card->ip_lock, flags); - if (!__qeth_l3_insert_ip_todo(card, iptodo, 0)) - kfree(iptodo); - spin_unlock_irqrestore(&card->ip_lock, flags); -} -/* - * Add/remove address to/from card's ip list, i.e. try to add or remove - * reference to/from an IP address that is already registered on the card. - * Returns: - * 0 address was on card and its reference count has been adjusted, - * but is still > 0, so nothing has to be done - * also returns 0 if card was not on card and the todo was to delete - * the address -> there is also nothing to be done - * 1 address was not on card and the todo is to add it to the card's ip - * list - * -1 address was on card and its reference count has been decremented - * to <= 0 by the todo -> address must be removed from card - */ -static int __qeth_l3_ref_ip_on_card(struct qeth_card *card, - struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr) -{ - struct qeth_ipaddr *addr; - int found = 0; - - list_for_each_entry(addr, &card->ip_list, entry) { - if ((addr->proto == QETH_PROT_IPV4) && - (todo->proto == QETH_PROT_IPV4) && - (addr->type == todo->type) && - (addr->u.a4.addr == todo->u.a4.addr) && - (addr->u.a4.mask == todo->u.a4.mask)) { - found = 1; - break; - } - if ((addr->proto == QETH_PROT_IPV6) && - (todo->proto == QETH_PROT_IPV6) && - (addr->type == todo->type) && - (addr->u.a6.pfxlen == todo->u.a6.pfxlen) && - (memcmp(&addr->u.a6.addr, &todo->u.a6.addr, - sizeof(struct in6_addr)) == 0)) { - found = 1; - break; - } - } - if (found) { - addr->users += todo->users; - if (addr->users <= 0) { - *__addr = addr; - return -1; - } else { - /* for VIPA and RXIP limit refcount to 1 */ - if (addr->type != QETH_IP_TYPE_NORMAL) - addr->users = 1; - return 0; + spin_lock_bh(&card->ip_lock); + + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { + if (!recover) { + hash_del(&addr->hnode); + kfree(addr); + continue; } + addr->disp_flag = QETH_DISP_ADDR_ADD; } - if (todo->users > 0) { - /* for VIPA and RXIP limit refcount to 1 */ - if (todo->type != QETH_IP_TYPE_NORMAL) - todo->users = 1; - return 1; - } else - return 0; -} -static void __qeth_l3_delete_all_mc(struct qeth_card *card, - unsigned long *flags) -{ - struct list_head fail_list; - struct qeth_ipaddr *addr, *tmp; - int rc; + spin_unlock_bh(&card->ip_lock); - INIT_LIST_HEAD(&fail_list); -again: - list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { - if (addr->is_multicast) { - list_del(&addr->entry); - spin_unlock_irqrestore(&card->ip_lock, *flags); - rc = qeth_l3_deregister_addr_entry(card, addr); - spin_lock_irqsave(&card->ip_lock, *flags); - if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) - kfree(addr); - else - list_add_tail(&addr->entry, &fail_list); - goto again; - } - } - list_splice(&fail_list, &card->ip_list); -} + spin_lock_bh(&card->mclock); -void qeth_l3_set_ip_addr_list(struct qeth_card *card) -{ - struct list_head *tbd_list; - struct qeth_ipaddr *todo, *addr; - unsigned long flags; - int rc; + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_del(&addr->hnode); + kfree(addr); + } - QETH_CARD_TEXT(card, 2, "sdiplist"); - QETH_CARD_HEX(card, 2, &card, sizeof(void *)); + spin_unlock_bh(&card->mclock); - if (!qeth_card_hw_is_reachable(card) || card->options.sniffer) - return; - spin_lock_irqsave(&card->ip_lock, flags); - tbd_list = card->ip_tbd_list; - card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC); - if (!card->ip_tbd_list) { - QETH_CARD_TEXT(card, 0, "silnomem"); - card->ip_tbd_list = tbd_list; - spin_unlock_irqrestore(&card->ip_lock, flags); - return; - } else - INIT_LIST_HEAD(card->ip_tbd_list); - - while (!list_empty(tbd_list)) { - todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry); - list_del(&todo->entry); - if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) { - __qeth_l3_delete_all_mc(card, &flags); - kfree(todo); - continue; - } - rc = __qeth_l3_ref_ip_on_card(card, todo, &addr); - if (rc == 0) { - /* nothing to be done; only adjusted refcount */ - kfree(todo); - } else if (rc == 1) { - /* new entry to be added to on-card list */ - spin_unlock_irqrestore(&card->ip_lock, flags); - rc = qeth_l3_register_addr_entry(card, todo); - spin_lock_irqsave(&card->ip_lock, flags); - if (!rc || (rc == IPA_RC_LAN_OFFLINE)) - list_add_tail(&todo->entry, &card->ip_list); - else - kfree(todo); - } else if (rc == -1) { - /* on-card entry to be removed */ - list_del_init(&addr->entry); - spin_unlock_irqrestore(&card->ip_lock, flags); - rc = qeth_l3_deregister_addr_entry(card, addr); - spin_lock_irqsave(&card->ip_lock, flags); - if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED)) - kfree(addr); - else - list_add_tail(&addr->entry, &card->ip_list); - kfree(todo); - } - } - spin_unlock_irqrestore(&card->ip_lock, flags); - kfree(tbd_list); } - -static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover) +static void qeth_l3_recover_ip(struct qeth_card *card) { - struct qeth_ipaddr *addr, *tmp; - unsigned long flags; + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + int rc; - QETH_CARD_TEXT(card, 4, "clearip"); - if (recover && card->options.sniffer) - return; - spin_lock_irqsave(&card->ip_lock, flags); - /* clear todo list */ - list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { - list_del(&addr->entry); - kfree(addr); - } + QETH_CARD_TEXT(card, 4, "recovrip"); + + spin_lock_bh(&card->ip_lock); - while (!list_empty(&card->ip_list)) { - addr = list_entry(card->ip_list.next, - struct qeth_ipaddr, entry); - list_del_init(&addr->entry); - if (!recover || addr->is_multicast) { + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { + if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { + qeth_l3_deregister_addr_entry(card, addr); + hash_del(&addr->hnode); kfree(addr); - continue; + } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + if (addr->proto == QETH_PROT_IPV4) { + addr->in_progress = 1; + spin_unlock_bh(&card->ip_lock); + rc = qeth_l3_register_addr_entry(card, addr); + spin_lock_bh(&card->ip_lock); + addr->in_progress = 0; + } else + rc = qeth_l3_register_addr_entry(card, addr); + + if (!rc) { + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + if (addr->ref_counter < 1) + qeth_l3_delete_ip(card, addr); + } else { + hash_del(&addr->hnode); + kfree(addr); + } } - list_add_tail(&addr->entry, card->ip_tbd_list); } - spin_unlock_irqrestore(&card->ip_lock, flags); -} - -static int qeth_l3_address_exists_in_list(struct list_head *list, - struct qeth_ipaddr *addr, int same_type) -{ - struct qeth_ipaddr *tmp; - list_for_each_entry(tmp, list, entry) { - if ((tmp->proto == QETH_PROT_IPV4) && - (addr->proto == QETH_PROT_IPV4) && - ((same_type && (tmp->type == addr->type)) || - (!same_type && (tmp->type != addr->type))) && - (tmp->u.a4.addr == addr->u.a4.addr)) - return 1; + spin_unlock_bh(&card->ip_lock); - if ((tmp->proto == QETH_PROT_IPV6) && - (addr->proto == QETH_PROT_IPV6) && - ((same_type && (tmp->type == addr->type)) || - (!same_type && (tmp->type != addr->type))) && - (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, - sizeof(struct in6_addr)) == 0)) - return 1; - - } - return 0; } static int qeth_l3_send_setdelmc(struct qeth_card *card, @@ -712,27 +609,28 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) */ static void qeth_l3_clear_ipato_list(struct qeth_card *card) { - struct qeth_ipato_entry *ipatoe, *tmp; - unsigned long flags; - spin_lock_irqsave(&card->ip_lock, flags); + spin_lock_bh(&card->ip_lock); + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { list_del(&ipatoe->entry); kfree(ipatoe); } - spin_unlock_irqrestore(&card->ip_lock, flags); + + spin_unlock_bh(&card->ip_lock); } int qeth_l3_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new) { struct qeth_ipato_entry *ipatoe; - unsigned long flags; int rc = 0; QETH_CARD_TEXT(card, 2, "addipato"); - spin_lock_irqsave(&card->ip_lock, flags); + + spin_lock_bh(&card->ip_lock); + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != new->proto) continue; @@ -743,10 +641,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, break; } } + if (!rc) list_add_tail(&new->entry, &card->ipato.entries); - spin_unlock_irqrestore(&card->ip_lock, flags); + spin_unlock_bh(&card->ip_lock); + return rc; } @@ -754,10 +654,11 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto, u8 *addr, int mask_bits) { struct qeth_ipato_entry *ipatoe, *tmp; - unsigned long flags; QETH_CARD_TEXT(card, 2, "delipato"); - spin_lock_irqsave(&card->ip_lock, flags); + + spin_lock_bh(&card->ip_lock); + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { if (ipatoe->proto != proto) continue; @@ -768,7 +669,8 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, kfree(ipatoe); } } - spin_unlock_irqrestore(&card->ip_lock, flags); + + spin_unlock_bh(&card->ip_lock); } /* @@ -778,7 +680,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, const u8 *addr) { struct qeth_ipaddr *ipaddr; - unsigned long flags; int rc = 0; ipaddr = qeth_l3_get_addr_buffer(proto); @@ -797,18 +698,18 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG; } else return -ENOMEM; - spin_lock_irqsave(&card->ip_lock, flags); - if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || - qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) + + spin_lock_bh(&card->ip_lock); + + if (qeth_l3_ip_from_hash(card, ipaddr)) rc = -EEXIST; - spin_unlock_irqrestore(&card->ip_lock, flags); - if (rc) { - kfree(ipaddr); - return rc; - } - if (!qeth_l3_add_ip(card, ipaddr)) - kfree(ipaddr); - qeth_l3_set_ip_addr_list(card); + else + qeth_l3_add_ip(card, ipaddr); + + spin_unlock_bh(&card->ip_lock); + + kfree(ipaddr); + return rc; } @@ -831,9 +732,12 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr->type = QETH_IP_TYPE_VIPA; } else return; - if (!qeth_l3_delete_ip(card, ipaddr)) - kfree(ipaddr); - qeth_l3_set_ip_addr_list(card); + + spin_lock_bh(&card->ip_lock); + qeth_l3_delete_ip(card, ipaddr); + spin_unlock_bh(&card->ip_lock); + + kfree(ipaddr); } /* @@ -843,7 +747,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, const u8 *addr) { struct qeth_ipaddr *ipaddr; - unsigned long flags; int rc = 0; ipaddr = qeth_l3_get_addr_buffer(proto); @@ -857,24 +760,25 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } + ipaddr->type = QETH_IP_TYPE_RXIP; ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG; ipaddr->del_flags = 0; } else return -ENOMEM; - spin_lock_irqsave(&card->ip_lock, flags); - if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || - qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) + + spin_lock_bh(&card->ip_lock); + + if (qeth_l3_ip_from_hash(card, ipaddr)) rc = -EEXIST; - spin_unlock_irqrestore(&card->ip_lock, flags); - if (rc) { - kfree(ipaddr); - return rc; - } - if (!qeth_l3_add_ip(card, ipaddr)) - kfree(ipaddr); - qeth_l3_set_ip_addr_list(card); - return 0; + else + qeth_l3_add_ip(card, ipaddr); + + spin_unlock_bh(&card->ip_lock); + + kfree(ipaddr); + + return rc; } void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, @@ -896,9 +800,12 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr->type = QETH_IP_TYPE_RXIP; } else return; - if (!qeth_l3_delete_ip(card, ipaddr)) - kfree(ipaddr); - qeth_l3_set_ip_addr_list(card); + + spin_lock_bh(&card->ip_lock); + qeth_l3_delete_ip(card, ipaddr); + spin_unlock_bh(&card->ip_lock); + + kfree(ipaddr); } static int qeth_l3_register_addr_entry(struct qeth_card *card, @@ -908,6 +815,7 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, int rc = 0; int cnt = 3; + if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "setaddr4"); QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); @@ -1013,36 +921,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) return rc; } -static int qeth_l3_default_setassparms_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "defadpcb"); - - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) { - cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; - if (cmd->hdr.prot_version == QETH_PROT_IPV4) - card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - if (cmd->hdr.prot_version == QETH_PROT_IPV6) - card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } - if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); - } - if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.tx_csum_mask = - cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); - } - - return 0; -} - #ifdef CONFIG_QETH_IPV6 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code) @@ -1056,7 +934,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, if (!iob) return -ENOMEM; rc = qeth_send_setassparms(card, iob, 0, 0, - qeth_l3_default_setassparms_cb, NULL); + qeth_setassparms_cb, NULL); return rc; } #endif @@ -1291,47 +1169,6 @@ out: return rc; } -static void qeth_l3_start_ipa_checksum(struct qeth_card *card) -{ - QETH_CARD_TEXT(card, 3, "strtcsum"); - if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) - && (card->dev->features & NETIF_F_RXCSUM)) - qeth_set_rx_csum(card, 1); -} - -static void qeth_l3_start_ipa_tx_checksum(struct qeth_card *card) -{ - QETH_CARD_TEXT(card, 3, "strttxcs"); - qeth_start_ipa_tx_checksum(card); -} - -static int qeth_l3_start_ipa_tso(struct qeth_card *card) -{ - int rc; - - QETH_CARD_TEXT(card, 3, "sttso"); - - if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { - dev_info(&card->gdev->dev, - "Outbound TSO not supported on %s\n", - QETH_CARD_IFNAME(card)); - rc = -EOPNOTSUPP; - } else { - rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, - IPA_CMD_ASS_START, 0); - if (rc) - dev_warn(&card->gdev->dev, "Starting outbound TCP " - "segmentation offload for %s failed\n", - QETH_CARD_IFNAME(card)); - else - dev_info(&card->gdev->dev, - "Outbound TSO enabled\n"); - } - if (rc) - card->dev->features &= ~NETIF_F_TSO; - return rc; -} - static int qeth_l3_start_ipassists(struct qeth_card *card) { QETH_CARD_TEXT(card, 3, "strtipas"); @@ -1345,9 +1182,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card) qeth_l3_start_ipa_multicast(card); /* go on*/ qeth_l3_start_ipa_ipv6(card); /* go on*/ qeth_l3_start_ipa_broadcast(card); /* go on*/ - qeth_l3_start_ipa_checksum(card); /* go on*/ - qeth_l3_start_ipa_tx_checksum(card); - qeth_l3_start_ipa_tso(card); /* go on*/ return 0; } @@ -1507,31 +1341,99 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); } -static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, - struct net_device *dev) +static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac) { ip_eth_mc_map(ipm, mac); } -static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) +static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + int i; + + hash_for_each(card->ip_mc_htable, i, addr, hnode) + addr->disp_flag = QETH_DISP_ADDR_DELETE; + +} + +static void qeth_l3_add_all_new_mc(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + int rc; + + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + rc = qeth_l3_register_addr_entry(card, addr); + if (!rc || (rc == IPA_RC_LAN_OFFLINE)) + addr->ref_counter = 1; + else { + hash_del(&addr->hnode); + kfree(addr); + } + } + } + +} + +static void qeth_l3_delete_nonused_mc(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + int rc; + + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { + rc = qeth_l3_deregister_addr_entry(card, addr); + if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) { + hash_del(&addr->hnode); + kfree(addr); + } + } + } + +} + + +static void +qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) { - struct qeth_ipaddr *ipm; struct ip_mc_list *im4; + struct qeth_ipaddr *tmp, *ipm; char buf[MAX_ADDR_LEN]; QETH_CARD_TEXT(card, 4, "addmc"); + + tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); + if (!tmp) + return; + for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { - qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); - ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (!ipm) - continue; - ipm->u.a4.addr = im4->multiaddr; - memcpy(ipm->mac, buf, OSA_ADDR_LEN); - ipm->is_multicast = 1; - if (!qeth_l3_add_ip(card, ipm)) - kfree(ipm); + qeth_l3_get_mac_for_ipm(im4->multiaddr, buf); + + tmp->u.a4.addr = im4->multiaddr; + memcpy(tmp->mac, buf, sizeof(tmp->mac)); + + ipm = qeth_l3_ip_from_hash(card, tmp); + if (ipm) { + ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + } else { + ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); + if (!ipm) + continue; + memcpy(ipm->mac, buf, sizeof(tmp->mac)); + ipm->u.a4.addr = im4->multiaddr; + ipm->is_multicast = 1; + ipm->disp_flag = QETH_DISP_ADDR_ADD; + hash_add(card->ip_mc_htable, + &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + } } + + kfree(tmp); } /* called with rcu_read_lock */ @@ -1541,6 +1443,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card) u16 vid; QETH_CARD_TEXT(card, 4, "addmcvl"); + if (!qeth_is_supported(card, IPA_FULL_VLAN)) return; @@ -1555,7 +1458,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card) in_dev = __in_dev_get_rcu(netdev); if (!in_dev) continue; - qeth_l3_add_mc(card, in_dev); + qeth_l3_add_mc_to_hash(card, in_dev); } } @@ -1564,36 +1467,60 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) struct in_device *in4_dev; QETH_CARD_TEXT(card, 4, "chkmcv4"); + rcu_read_lock(); in4_dev = __in_dev_get_rcu(card->dev); if (in4_dev == NULL) goto unlock; - qeth_l3_add_mc(card, in4_dev); + qeth_l3_add_mc_to_hash(card, in4_dev); qeth_l3_add_vlan_mc(card); unlock: rcu_read_unlock(); } #ifdef CONFIG_QETH_IPV6 -static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) +static void +qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev) { struct qeth_ipaddr *ipm; struct ifmcaddr6 *im6; + struct qeth_ipaddr *tmp; char buf[MAX_ADDR_LEN]; QETH_CARD_TEXT(card, 4, "addmc6"); + + tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + if (!tmp) + return; + for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); + + memcpy(tmp->mac, buf, sizeof(tmp->mac)); + memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr, + sizeof(struct in6_addr)); + tmp->is_multicast = 1; + + ipm = qeth_l3_ip_from_hash(card, tmp); + if (ipm) { + ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + continue; + } + ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (!ipm) continue; - ipm->is_multicast = 1; + memcpy(ipm->mac, buf, OSA_ADDR_LEN); memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, sizeof(struct in6_addr)); - if (!qeth_l3_add_ip(card, ipm)) - kfree(ipm); + ipm->is_multicast = 1; + ipm->disp_flag = QETH_DISP_ADDR_ADD; + hash_add(card->ip_mc_htable, + &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + } + kfree(tmp); } /* called with rcu_read_lock */ @@ -1603,6 +1530,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) u16 vid; QETH_CARD_TEXT(card, 4, "admc6vl"); + if (!qeth_is_supported(card, IPA_FULL_VLAN)) return; @@ -1618,7 +1546,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) if (!in_dev) continue; read_lock_bh(&in_dev->lock); - qeth_l3_add_mc6(card, in_dev); + qeth_l3_add_mc6_to_hash(card, in_dev); read_unlock_bh(&in_dev->lock); in6_dev_put(in_dev); } @@ -1629,14 +1557,16 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) struct inet6_dev *in6_dev; QETH_CARD_TEXT(card, 4, "chkmcv6"); + if (!qeth_is_supported(card, IPA_IPV6)) return ; in6_dev = in6_dev_get(card->dev); - if (in6_dev == NULL) + if (!in6_dev) return; + rcu_read_lock(); read_lock_bh(&in6_dev->lock); - qeth_l3_add_mc6(card, in6_dev); + qeth_l3_add_mc6_to_hash(card, in6_dev); qeth_l3_add_vlan_mc6(card); read_unlock_bh(&in6_dev->lock); rcu_read_unlock(); @@ -1660,16 +1590,23 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, in_dev = in_dev_get(netdev); if (!in_dev) return; + + addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); + if (!addr) + return; + + spin_lock_bh(&card->ip_lock); + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { - addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (addr) { - addr->u.a4.addr = ifa->ifa_address; - addr->u.a4.mask = ifa->ifa_mask; - addr->type = QETH_IP_TYPE_NORMAL; - if (!qeth_l3_delete_ip(card, addr)) - kfree(addr); - } + addr->u.a4.addr = ifa->ifa_address; + addr->u.a4.mask = ifa->ifa_mask; + addr->type = QETH_IP_TYPE_NORMAL; + qeth_l3_delete_ip(card, addr); } + + spin_unlock_bh(&card->ip_lock); + + kfree(addr); in_dev_put(in_dev); } @@ -1687,20 +1624,28 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid); if (!netdev) return; + in6_dev = in6_dev_get(netdev); if (!in6_dev) return; + + addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + if (!addr) + return; + + spin_lock_bh(&card->ip_lock); + list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { - addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); - if (addr) { - memcpy(&addr->u.a6.addr, &ifa->addr, - sizeof(struct in6_addr)); - addr->u.a6.pfxlen = ifa->prefix_len; - addr->type = QETH_IP_TYPE_NORMAL; - if (!qeth_l3_delete_ip(card, addr)) - kfree(addr); - } + memcpy(&addr->u.a6.addr, &ifa->addr, + sizeof(struct in6_addr)); + addr->u.a6.pfxlen = ifa->prefix_len; + addr->type = QETH_IP_TYPE_NORMAL; + qeth_l3_delete_ip(card, addr); } + + spin_unlock_bh(&card->ip_lock); + + kfree(addr); in6_dev_put(in6_dev); #endif /* CONFIG_QETH_IPV6 */ } @@ -1727,18 +1672,16 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; - unsigned long flags; QETH_CARD_TEXT_(card, 4, "kid:%d", vid); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "kidREC"); return 0; } - spin_lock_irqsave(&card->vlanlock, flags); /* unregister IP addresses of vlan device */ qeth_l3_free_vlan_addresses(card, vid); clear_bit(vid, card->active_vlans); - spin_unlock_irqrestore(&card->vlanlock, flags); qeth_l3_set_multicast_list(card->dev); return 0; } @@ -1994,8 +1937,8 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev, static int qeth_l3_verify_dev(struct net_device *dev) { struct qeth_card *card; - unsigned long flags; int rc = 0; + unsigned long flags; read_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_for_each_entry(card, &qeth_core_card_list.list, list) { @@ -2051,7 +1994,7 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l3_clear_ip_list(card, 1); + qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } @@ -2106,12 +2049,20 @@ static void qeth_l3_set_multicast_list(struct net_device *dev) (card->state != CARD_STATE_UP)) return; if (!card->options.sniffer) { - qeth_l3_delete_mc_addresses(card); + + spin_lock_bh(&card->mclock); + + qeth_l3_mark_all_mc_to_be_deleted(card); + qeth_l3_add_multicast_ipv4(card); #ifdef CONFIG_QETH_IPV6 qeth_l3_add_multicast_ipv6(card); #endif - qeth_l3_set_ip_addr_list(card); + qeth_l3_delete_nonused_mc(card); + qeth_l3_add_all_new_mc(card); + + spin_unlock_bh(&card->mclock); + if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) return; } @@ -2375,22 +2326,21 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) if (rc) { if (copy_to_user(udata, qinfo.udata, 4)) rc = -EFAULT; - goto free_and_out; - } else { + goto free_and_out; + } #ifdef CONFIG_QETH_IPV6 - if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { - /* fails in case of GuestLAN QDIO mode */ - qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, - &qinfo); - } + if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { + /* fails in case of GuestLAN QDIO mode */ + qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); + } #endif - if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { - QETH_CARD_TEXT(card, 4, "qactf"); - rc = -EFAULT; - goto free_and_out; - } - QETH_CARD_TEXT(card, 4, "qacts"); + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { + QETH_CARD_TEXT(card, 4, "qactf"); + rc = -EFAULT; + goto free_and_out; } + QETH_CARD_TEXT(card, 4, "qacts"); + free_and_out: kfree(qinfo.udata); out: @@ -2427,7 +2377,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, rc = qeth_send_setassparms(card, iob, sizeof(struct qeth_arp_cache_entry), (unsigned long) entry, - qeth_l3_default_setassparms_cb, NULL); + qeth_setassparms_cb, NULL); if (rc) { tmp = rc; qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); @@ -2467,7 +2417,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, return -ENOMEM; rc = qeth_send_setassparms(card, iob, 12, (unsigned long)buf, - qeth_l3_default_setassparms_cb, NULL); + qeth_setassparms_cb, NULL); if (rc) { tmp = rc; memset(buf, 0, 16); @@ -2793,15 +2743,34 @@ static void qeth_tso_fill_header(struct qeth_card *card, } } -static inline int qeth_l3_tso_elements(struct sk_buff *skb) +/** + * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso + * @card: qeth card structure, to check max. elems. + * @skb: SKB address + * @extra_elems: extra elems needed, to check against max. + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * skb data, including linear part and fragments, but excluding TCP header. + * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().) + * Checks if the result plus extra_elems fits under the limit for the card. + * Returns 0 if it does not. + * Note: extra_elems is not included in the returned result. + */ +static int qeth_l3_get_elements_no_tso(struct qeth_card *card, + struct sk_buff *skb, int extra_elems) { - unsigned long tcpd = (unsigned long)tcp_hdr(skb) + - tcp_hdr(skb)->doff * 4; - int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data); - int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); - - elements += qeth_get_elements_for_frags(skb); + addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); + int elements = qeth_get_elements_for_range( + tcpdptr, + (addr_t)skb->data + skb_headlen(skb)) + + qeth_get_elements_for_frags(skb); + if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { + QETH_DBF_MESSAGE(2, + "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n", + elements + extra_elems, skb->len); + return 0; + } return elements; } @@ -2810,8 +2779,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) int rc; u16 *tag; struct qeth_hdr *hdr = NULL; - int elements_needed = 0; - int elems; + int hdr_elements = 0; + int elements; struct qeth_card *card = dev->ml_priv; struct sk_buff *new_skb = NULL; int ipv = qeth_get_ip_version(skb); @@ -2822,7 +2791,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) qeth_get_priority_queue(card, skb, ipv, cast_type) : card->qdio.default_out_queue]; int tx_bytes = skb->len; - bool large_send; + bool use_tso; int data_offset = -1; int nr_frags; @@ -2847,10 +2816,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) card->perf_stats.outbound_start_time = qeth_get_micros(); } - large_send = skb_is_gso(skb); + /* Ignore segment size from skb_is_gso(), 1 page is always used. */ + use_tso = skb_is_gso(skb) && + (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4); - if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && - (skb_shinfo(skb)->nr_frags == 0)) { + if ((card->info.type == QETH_CARD_TYPE_IQD) && + !skb_is_nonlinear(skb)) { new_skb = skb; if (new_skb->protocol == ETH_P_AF_IUCV) data_offset = 0; @@ -2859,7 +2830,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; - elements_needed++; + hdr_elements++; } else { /* create a clone with writeable headroom */ new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) @@ -2894,22 +2865,28 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if (large_send) { - if (qeth_l3_tso_elements(new_skb) + 1 > 16) { - if (skb_linearize(new_skb)) - goto tx_drop; - if (card->options.performance_stats) + if ((card->info.type != QETH_CARD_TYPE_IQD) && + ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || + (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { + int lin_rc = skb_linearize(new_skb); + + if (card->options.performance_stats) { + if (lin_rc) + card->perf_stats.tx_linfail++; + else card->perf_stats.tx_lin++; } + if (lin_rc) + goto tx_drop; } - if (large_send && (cast_type == RTN_UNSPEC)) { + if (use_tso) { hdr = (struct qeth_hdr *)skb_push(new_skb, sizeof(struct qeth_hdr_tso)); memset(hdr, 0, sizeof(struct qeth_hdr_tso)); qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); qeth_tso_fill_header(card, hdr, new_skb); - elements_needed++; + hdr_elements++; } else { if (data_offset < 0) { hdr = (struct qeth_hdr *)skb_push(new_skb, @@ -2930,31 +2907,31 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) qeth_l3_hdr_csum(card, hdr, new_skb); } - elems = qeth_get_elements_no(card, new_skb, elements_needed); - if (!elems) { + elements = use_tso ? + qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : + qeth_get_elements_no(card, new_skb, hdr_elements); + if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); goto tx_drop; } - elements_needed += elems; - nr_frags = skb_shinfo(new_skb)->nr_frags; + elements += hdr_elements; if (card->info.type != QETH_CARD_TYPE_IQD) { int len; - if (large_send) + if (use_tso) len = ((unsigned long)tcp_hdr(new_skb) + - tcp_hdr(new_skb)->doff * 4) - + tcp_hdrlen(new_skb)) - (unsigned long)new_skb->data; else len = sizeof(struct qeth_hdr_layer3); if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, - elements_needed); + rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - elements_needed, data_offset, 0); + elements, data_offset, 0); if (!rc) { card->stats.tx_packets++; @@ -2962,7 +2939,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { - if (large_send) { + nr_frags = skb_shinfo(new_skb)->nr_frags; + if (use_tso) { card->perf_stats.large_send_bytes += tx_bytes; card->perf_stats.large_send_cnt++; } @@ -3048,36 +3026,6 @@ static int qeth_l3_stop(struct net_device *dev) return 0; } -static netdev_features_t qeth_l3_fix_features(struct net_device *dev, - netdev_features_t features) -{ - struct qeth_card *card = dev->ml_priv; - - if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) - features &= ~NETIF_F_IP_CSUM; - if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) - features &= ~NETIF_F_TSO; - if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) - features &= ~NETIF_F_RXCSUM; - return features; -} - -static int qeth_l3_set_features(struct net_device *dev, - netdev_features_t features) -{ - struct qeth_card *card = dev->ml_priv; - netdev_features_t changed = dev->features ^ features; - - if (!(changed & NETIF_F_RXCSUM)) - return 0; - - if (card->state == CARD_STATE_DOWN || - card->state == CARD_STATE_RECOVER) - return 0; - - return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0); -} - static const struct ethtool_ops qeth_l3_ethtool_ops = { .get_link = ethtool_op_get_link, .get_strings = qeth_core_get_strings, @@ -3120,8 +3068,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_l3_do_ioctl, .ndo_change_mtu = qeth_change_mtu, - .ndo_fix_features = qeth_l3_fix_features, - .ndo_set_features = qeth_l3_set_features, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, @@ -3136,8 +3084,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_l3_do_ioctl, .ndo_change_mtu = qeth_change_mtu, - .ndo_fix_features = qeth_l3_fix_features, - .ndo_set_features = qeth_l3_set_features, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, @@ -3169,7 +3117,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; - card->dev->features = NETIF_F_RXCSUM; + card->dev->vlan_features = NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_TSO; } } } else if (card->info.type == QETH_CARD_TYPE_IQD) { @@ -3195,7 +3145,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - card->dev->gso_max_size = 15 * PAGE_SIZE; + card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * + PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); @@ -3231,7 +3182,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) card->dev = NULL; } - qeth_l3_clear_ip_list(card, 0); + qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); return; } @@ -3316,7 +3267,7 @@ contin: card->state = CARD_STATE_SOFTSETUP; qeth_set_allowed_threads(card, 0xffffffff, 0); - qeth_l3_set_ip_addr_list(card); + qeth_l3_recover_ip(card); if (card->lan_online) netif_carrier_on(card->dev); else @@ -3328,6 +3279,7 @@ contin: else dev_open(card->dev); qeth_l3_set_multicast_list(card->dev); + qeth_recover_features(card->dev); rtnl_unlock(); } qeth_trace_features(card); @@ -3517,6 +3469,7 @@ EXPORT_SYMBOL_GPL(qeth_l3_discipline); static int qeth_l3_ip_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev; struct qeth_ipaddr *addr; @@ -3531,27 +3484,27 @@ static int qeth_l3_ip_event(struct notifier_block *this, QETH_CARD_TEXT(card, 3, "ipevent"); addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (addr != NULL) { + if (addr) { addr->u.a4.addr = ifa->ifa_address; addr->u.a4.mask = ifa->ifa_mask; addr->type = QETH_IP_TYPE_NORMAL; } else - goto out; + return NOTIFY_DONE; switch (event) { case NETDEV_UP: - if (!qeth_l3_add_ip(card, addr)) - kfree(addr); + spin_lock_bh(&card->ip_lock); + qeth_l3_add_ip(card, addr); + spin_unlock_bh(&card->ip_lock); break; case NETDEV_DOWN: - if (!qeth_l3_delete_ip(card, addr)) - kfree(addr); - break; - default: + spin_lock_bh(&card->ip_lock); + qeth_l3_delete_ip(card, addr); + spin_unlock_bh(&card->ip_lock); break; } - qeth_l3_set_ip_addr_list(card); -out: + + kfree(addr); return NOTIFY_DONE; } @@ -3580,27 +3533,27 @@ static int qeth_l3_ip6_event(struct notifier_block *this, return NOTIFY_DONE; addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); - if (addr != NULL) { + if (addr) { memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr)); addr->u.a6.pfxlen = ifa->prefix_len; addr->type = QETH_IP_TYPE_NORMAL; } else - goto out; + return NOTIFY_DONE; switch (event) { case NETDEV_UP: - if (!qeth_l3_add_ip(card, addr)) - kfree(addr); + spin_lock_bh(&card->ip_lock); + qeth_l3_add_ip(card, addr); + spin_unlock_bh(&card->ip_lock); break; case NETDEV_DOWN: - if (!qeth_l3_delete_ip(card, addr)) - kfree(addr); - break; - default: + spin_lock_bh(&card->ip_lock); + qeth_l3_delete_ip(card, addr); + spin_unlock_bh(&card->ip_lock); break; } - qeth_l3_set_ip_addr_list(card); -out: + + kfree(addr); return NOTIFY_DONE; } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 386eb7b89..0e00a5ce0 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <asm/ebcdic.h> +#include <linux/hashtable.h> #include "qeth_l3.h" #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ @@ -285,19 +286,21 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (card->options.hsuid[0]) { /* delete old ip address */ addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); - if (addr != NULL) { - addr->u.a6.addr.s6_addr32[0] = 0xfe800000; - addr->u.a6.addr.s6_addr32[1] = 0x00000000; - for (i = 8; i < 16; i++) - addr->u.a6.addr.s6_addr[i] = - card->options.hsuid[i - 8]; - addr->u.a6.pfxlen = 0; - addr->type = QETH_IP_TYPE_NORMAL; - } else + if (!addr) return -ENOMEM; - if (!qeth_l3_delete_ip(card, addr)) - kfree(addr); - qeth_l3_set_ip_addr_list(card); + + addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[1] = 0x00000000; + for (i = 8; i < 16; i++) + addr->u.a6.addr.s6_addr[i] = + card->options.hsuid[i - 8]; + addr->u.a6.pfxlen = 0; + addr->type = QETH_IP_TYPE_NORMAL; + + spin_lock_bh(&card->ip_lock); + qeth_l3_delete_ip(card, addr); + spin_unlock_bh(&card->ip_lock); + kfree(addr); } if (strlen(tmp) == 0) { @@ -328,9 +331,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, addr->type = QETH_IP_TYPE_NORMAL; } else return -ENOMEM; - if (!qeth_l3_add_ip(card, addr)) - kfree(addr); - qeth_l3_set_ip_addr_list(card); + + spin_lock_bh(&card->ip_lock); + qeth_l3_add_ip(card, addr); + spin_unlock_bh(&card->ip_lock); + kfree(addr); return count; } @@ -367,8 +372,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - struct qeth_ipaddr *tmpipa, *t; - int rc = 0; + struct qeth_ipaddr *addr; + int i, rc = 0; if (!card) return -EINVAL; @@ -384,21 +389,20 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, card->ipato.enabled = (card->ipato.enabled)? 0 : 1; } else if (sysfs_streq(buf, "1")) { card->ipato.enabled = 1; - list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { - if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && - qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) - tmpipa->set_flags |= + hash_for_each(card->ip_htable, i, addr, hnode) { + if ((addr->type == QETH_IP_TYPE_NORMAL) && + qeth_l3_is_addr_covered_by_ipato(card, addr)) + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; - } - + } } else if (sysfs_streq(buf, "0")) { card->ipato.enabled = 0; - list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { - if (tmpipa->set_flags & - QETH_IPA_SETIP_TAKEOVER_FLAG) - tmpipa->set_flags &= - ~QETH_IPA_SETIP_TAKEOVER_FLAG; - } + hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->set_flags & + QETH_IPA_SETIP_TAKEOVER_FLAG) + addr->set_flags &= + ~QETH_IPA_SETIP_TAKEOVER_FLAG; + } } else rc = -EINVAL; out: @@ -452,7 +456,6 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipato_entry *ipatoe; - unsigned long flags; char addr_str[40]; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ int i = 0; @@ -460,7 +463,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; /* add strlen for "/<mask>\n" */ entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; - spin_lock_irqsave(&card->ip_lock, flags); + spin_lock_bh(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != proto) continue; @@ -473,7 +476,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, i += snprintf(buf + i, PAGE_SIZE - i, "%s/%i\n", addr_str, ipatoe->mask_bits); } - spin_unlock_irqrestore(&card->ip_lock, flags); + spin_unlock_bh(&card->ip_lock); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; @@ -689,15 +692,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; + struct hlist_node *tmp; char addr_str[40]; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - unsigned long flags; int i = 0; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ - spin_lock_irqsave(&card->ip_lock, flags); - list_for_each_entry(ipaddr, &card->ip_list, entry) { + spin_lock_bh(&card->ip_lock); + hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_VIPA) @@ -711,7 +714,7 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, addr_str); i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); } - spin_unlock_irqrestore(&card->ip_lock, flags); + spin_unlock_bh(&card->ip_lock); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; @@ -851,15 +854,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; + struct hlist_node *tmp; char addr_str[40]; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - unsigned long flags; int i = 0; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ - spin_lock_irqsave(&card->ip_lock, flags); - list_for_each_entry(ipaddr, &card->ip_list, entry) { + spin_lock_bh(&card->ip_lock); + hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_RXIP) @@ -873,7 +876,7 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, addr_str); i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); } - spin_unlock_irqrestore(&card->ip_lock, flags); + spin_unlock_bh(&card->ip_lock); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile index 241891a57..df40692a9 100644 --- a/drivers/s390/virtio/Makefile +++ b/drivers/s390/virtio/Makefile @@ -6,4 +6,8 @@ # it under the terms of the GNU General Public License (version 2 only) # as published by the Free Software Foundation. -obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o +s390-virtio-objs := virtio_ccw.o +ifdef CONFIG_S390_GUEST_OLD_TRANSPORT +s390-virtio-objs += kvm_virtio.o +endif +obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs) diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 1d060fd29..5e5c11f37 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -458,6 +458,8 @@ static int __init kvm_devices_init(void) if (test_devices_support(total_memory_size) < 0) return -ENODEV; + pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n"); + rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); if (rc) return rc; @@ -482,7 +484,7 @@ static int __init kvm_devices_init(void) } /* code for early console output with virtio_console */ -static __init int early_put_chars(u32 vtermno, const char *buf, int count) +static int early_put_chars(u32 vtermno, const char *buf, int count) { char scratch[17]; unsigned int len = count; |