diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/chsc.c | 81 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chsc_sch.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 14 | ||||
-rw-r--r-- | drivers/s390/cio/cmf.c | 6 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 22 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 5 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/device_pgid.c | 123 | ||||
-rw-r--r-- | drivers/s390/cio/eadm_sch.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/io_sch.h | 5 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 12 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 2 |
14 files changed, 185 insertions, 95 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 68e80e2734a4..31ceef1beb8b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -283,7 +283,7 @@ struct chsc_sei_nt2_area { u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ } __packed; -#define CHSC_SEI_NT0 0ULL +#define CHSC_SEI_NT0 (1ULL << 63) #define CHSC_SEI_NT2 (1ULL << 61) struct chsc_sei { @@ -291,7 +291,8 @@ struct chsc_sei { u32 reserved1; u64 ntsm; /* notification type mask */ struct chsc_header response; - u32 reserved2; + u32 :24; + u8 nt; union { struct chsc_sei_nt0_area nt0_area; struct chsc_sei_nt2_area nt2_area; @@ -434,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { -#ifdef CONFIG_PCI switch (sei_area->cc) { case 1: zpci_event_error(sei_area->ccdf); @@ -443,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) zpci_event_availability(sei_area->ccdf); break; default: - CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", sei_area->cc); break; } -#endif } static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) @@ -470,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) chsc_process_sei_scm_change(sei_area); break; default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) { + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); + } } -static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) +static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { do { memset(sei, 0, sizeof(*sei)); @@ -487,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) if (chsc(sei)) break; - if (sei->response.code == 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei successful\n"); - - /* Check if we might have lost some information. */ - if (sei->u.nt0_area.flags & 0x40) { - CIO_CRW_EVENT(2, "chsc: event overflow\n"); - css_schedule_eval_all(); - } - - switch (sei->ntsm) { - case CHSC_SEI_NT0: - chsc_process_sei_nt0(&sei->u.nt0_area); - return 1; - case CHSC_SEI_NT2: - chsc_process_sei_nt2(&sei->u.nt2_area); - return 1; - default: - CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n", - sei->ntsm); - return 0; - } - } else { + if (sei->response.code != 0x0001) { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei->response.code); break; } - } while (sei->u.nt0_area.flags & 0x80); - return 0; + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); + switch (sei->nt) { + case 0: + chsc_process_sei_nt0(&sei->u.nt0_area); + break; + case 2: + chsc_process_sei_nt2(&sei->u.nt2_area); + break; + default: + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); + break; + } + } while (sei->u.nt0_area.flags & 0x80); } +/* + * Handle channel subsystem related CRWs. + * Use store event information to find out what's going on. + * + * Note: Access to sei_page is serialized through machine check handler + * thread, so no need for locking. + */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - struct chsc_sei *sei; + struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); @@ -530,22 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); - if (!sei_page) - return; - /* Access to sei_page is serialized through machine check handler - * thread, so no need for locking. */ - sei = sei_page; CIO_TRACE_EVENT(2, "prcss"); - - /* - * The ntsm does not allow to select NT0 and NT2 together. We need to - * first check for NT2, than additionally for NT0... - */ -#ifdef CONFIG_PCI - if (!__chsc_process_crw(sei, CHSC_SEI_NT2)) -#endif - __chsc_process_crw(sei, CHSC_SEI_NT0); + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); } void chsc_chp_online(struct chp_id chpid) diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 662dab4b93e6..227e05f674b3 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -157,7 +157,7 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); #ifdef CONFIG_SCM_BUS int scm_update_information(void); #else /* CONFIG_SCM_BUS */ -#define scm_update_information() 0 +static inline int scm_update_information(void) { return 0; } #endif /* CONFIG_SCM_BUS */ diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 8f9a1a384496..facdf809113f 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); - kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++; + inc_irq_stat(IRQIO_CSC); /* Copy irb to provided request and set done. */ if (!request) { diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 8e927b9f285f..986ef6a92a41 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -611,7 +611,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; irb = (struct irb *)&S390_lowcore.irb; do { - kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; + kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL); if (tpi_info->adapter_IO) { do_adapter_IO(tpi_info->isc); continue; @@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) { /* Clear pending interrupt condition. */ - kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; + inc_irq_stat(IRQIO_CIO); tsch(tpi_info->schid, irb); continue; } @@ -633,9 +633,9 @@ void __irq_entry do_IRQ(struct pt_regs *regs) if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else - kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; + inc_irq_stat(IRQIO_CIO); } else - kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; + inc_irq_stat(IRQIO_CIO); spin_unlock(sch->lock); /* * Are more interrupts pending? @@ -678,7 +678,7 @@ static void cio_tsch(struct subchannel *sch) if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else - kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; + inc_irq_stat(IRQIO_CIO); if (!irq_context) { irq_exit(); _local_bh_enable(); @@ -962,9 +962,9 @@ static void css_reset(void) atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ - timeout = get_clock() + (RCHP_TIMEOUT << 12); + timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { - if (get_clock() > timeout) + if (get_tod_clock() > timeout) break; cpu_relax(); } diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index c9fc61c0a866..4495e0627a40 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -33,7 +33,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> -#include <linux/timex.h> /* get_clock() */ +#include <linux/timex.h> /* get_tod_clock() */ #include <asm/ccwdev.h> #include <asm/cio.h> @@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev) memcpy(cmb_data->last_block, hw_block, cmb_data->size); memcpy(reference_buf, hw_block, cmb_data->size); } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); - cmb_data->last_update = get_clock(); + cmb_data->last_update = get_tod_clock(); kfree(reference_buf); return 0; } @@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev) memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); cmb_data->last_update = 0; } - cdev->private->cmb_start_time = get_clock(); + cdev->private->cmb_start_time = get_tod_clock(); spin_unlock_irq(cdev->ccwlock); } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fd00afd8b850..a239237d43f3 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -780,7 +780,7 @@ static int __init setup_css(int nr) css->cssid = nr; dev_set_name(&css->device, "css%x", nr); css->device.release = channel_subsystem_release; - tod_high = (u32) (get_clock() >> 32); + tod_high = (u32) (get_tod_clock() >> 32); css_generate_pgid(css, tod_high); return 0; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6995cff44636..c6767f5a58b2 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -632,6 +632,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr, return count; } +static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%02x\n", sch->vpm); +} + static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); @@ -640,11 +648,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); +static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, &dev_attr_logging.attr, + &dev_attr_vpm.attr, NULL, }; @@ -758,7 +768,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, struct ccw_device *cdev) { cdev->private->cdev = cdev; - cdev->private->int_class = IOINT_CIO; + cdev->private->int_class = IRQIO_CIO; atomic_set(&cdev->private->onoff, 0); cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; @@ -1023,7 +1033,7 @@ static void io_subchannel_irq(struct subchannel *sch) if (cdev) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); else - kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; + inc_irq_stat(IRQIO_CIO); } void io_subchannel_init_config(struct subchannel *sch) @@ -1634,7 +1644,7 @@ ccw_device_probe_console(void) memset(&console_private, 0, sizeof(struct ccw_device_private)); console_cdev.private = &console_private; console_private.cdev = &console_cdev; - console_private.int_class = IOINT_CIO; + console_private.int_class = IRQIO_CIO; ret = ccw_device_console_enable(&console_cdev, sch); if (ret) { cio_release_console(); @@ -1715,13 +1725,13 @@ ccw_device_probe (struct device *dev) if (cdrv->int_class != 0) cdev->private->int_class = cdrv->int_class; else - cdev->private->int_class = IOINT_CIO; + cdev->private->int_class = IRQIO_CIO; ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; if (ret) { cdev->drv = NULL; - cdev->private->int_class = IOINT_CIO; + cdev->private->int_class = IRQIO_CIO; return ret; } @@ -1755,7 +1765,7 @@ ccw_device_remove (struct device *dev) } ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; - cdev->private->int_class = IOINT_CIO; + cdev->private->int_class = IRQIO_CIO; return 0; } diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 2e575cff9845..7d4ecb65db00 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -61,11 +61,10 @@ dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event) if (dev_event == DEV_EVENT_INTERRUPT) { if (state == DEV_STATE_ONLINE) - kstat_cpu(smp_processor_id()). - irqs[cdev->private->int_class]++; + inc_irq_stat(cdev->private->int_class); else if (state != DEV_STATE_CMFCHANGE && state != DEV_STATE_CMFUPDATE) - kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++; + inc_irq_stat(IRQIO_CIO); } dev_jumptable[state][dev_event](cdev, dev_event); } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1bb1d00095af..c7638c543250 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) cc = stsch_err(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " - "device information:\n", get_clock()); + "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, orb, sizeof(*orb), 0); diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 908d287f66c1..37ada05e82a5 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -23,6 +23,8 @@ #define PGID_RETRIES 256 #define PGID_TIMEOUT (10 * HZ) +static void verify_start(struct ccw_device *cdev); + /* * Process path verification data and report result. */ @@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - /* Adjust lpm. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; nop_build_cp(cdev); @@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: sch->vpm |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; nop_do(cdev); return; @@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn) req->cp = cp; } +static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) +{ + if (rc) { + /* We don't know the path groups' state. Abort. */ + verify_done(cdev, rc); + return; + } + /* + * Path groups have been reset. Restart path verification but + * leave paths in path_noirq_mask out. + */ + cdev->private->flags.pgid_unknown = 0; + verify_start(cdev); +} + +/* + * Reset pathgroups and restart path verification, leave unusable paths out. + */ +static void pgid_wipeout_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + struct ccw_request *req = &cdev->private->req; + u8 fn; + + CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", + id->ssid, id->devno, cdev->private->pgid_valid_mask, + cdev->private->path_noirq_mask); + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam; + req->callback = pgid_wipeout_callback; + fn = SPID_FUNC_DISBAND; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); +} + /* * Perform establish/resign SET PGID on a single path. */ @@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev) return; out_nopath: + if (cdev->private->flags.pgid_unknown) { + /* At least one SPID could be partially done. */ + pgid_wipeout_start(cdev); + return; + } verify_done(cdev, sch->vpm ? 0 : -EACCES); } -static void verify_start(struct ccw_device *cdev); - /* * Process SET PGID request result for a single path. */ @@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc) case 0: sch->vpm |= req->lpm & sch->opm; break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; case -EOPNOTSUPP: if (cdev->private->flags.mpath) { @@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc) else { donepm = pgid_to_donepm(cdev); sch->vpm = donepm & sch->opm; - cdev->private->pgid_todo_mask &= ~donepm; cdev->private->pgid_reset_mask |= reset; + cdev->private->pgid_todo_mask &= + ~(donepm | cdev->private->path_noirq_mask); pgid_fill(cdev, pgid); } out: @@ -341,6 +404,10 @@ out: cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { case 0: + if (cdev->private->flags.pgid_unknown) { + pgid_wipeout_start(cdev); + return; + } /* Anything left to do? */ if (cdev->private->pgid_todo_mask == 0) { verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); @@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; + int ret; - /* Adjust lpm if paths are not set in pam. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; snid_build_cp(cdev); @@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev) return; out_nopath: - snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); + if (cdev->private->pgid_valid_mask) + ret = 0; + else if (cdev->private->path_noirq_mask) + ret = -ETIME; + else + ret = -EACCES; + snid_done(cdev, ret); } /* @@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: cdev->private->pgid_valid_mask |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; snid_do(cdev); return; @@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev) sch->vpm = 0; sch->lpm = sch->schib.pmcw.pam; + + /* Initialize PGID data. */ + memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); + cdev->private->pgid_valid_mask = 0; + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; + cdev->private->path_notoper_mask = 0; + /* Initialize request data. */ memset(req, 0, sizeof(*req)); req->timeout = PGID_TIMEOUT; @@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev) */ void ccw_device_verify_start(struct ccw_device *cdev) { - struct subchannel *sch = to_subchannel(cdev->dev.parent); - CIO_TRACE_EVENT(4, "vrfy"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); - /* Initialize PGID data. */ - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); - cdev->private->pgid_valid_mask = 0; - cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; /* * Initialize pathgroup and multipath state with target values. * They may change in the course of path verification. @@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev) cdev->private->flags.pgroup = cdev->private->options.pgroup; cdev->private->flags.mpath = cdev->private->options.mpath; cdev->private->flags.doverify = 0; + cdev->private->path_noirq_mask = 0; verify_start(cdev); } diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 6c9673400464..d9eddcba7e88 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -139,7 +139,7 @@ static void eadm_subchannel_irq(struct subchannel *sch) EADM_LOG(6, "irq"); EADM_LOG_HEX(6, irb, sizeof(*irb)); - kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++; + inc_irq_stat(IRQIO_ADM); if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) && scsw->eswf == 1 && irb->esw.eadm.erw.r) diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 76253dfcc1be..b108f4a5c7dd 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -126,6 +126,10 @@ struct ccw_device_private { u8 pgid_valid_mask; /* mask of valid PGIDs */ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ u8 pgid_reset_mask; /* mask of PGIDs which were reset */ + u8 path_noirq_mask; /* mask of paths for which no irq was + received */ + u8 path_notoper_mask; /* mask of paths which were found + not operable */ u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_new_mask; /* mask of paths, that became available */ struct { @@ -145,6 +149,7 @@ struct ccw_device_private { unsigned int resuming:1; /* recognition while resume */ unsigned int pgroup:1; /* pathgroup is set up */ unsigned int mpath:1; /* multipathing is set up */ + unsigned int pgid_unknown:1;/* unknown pgid state */ unsigned int initialized:1; /* set if initial reference held */ } __attribute__((packed)) flags; unsigned long intparm; /* user interruption parameter */ diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 1671d3461f29..abc550e5dd35 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -338,10 +338,10 @@ again: retries++; if (!start_time) { - start_time = get_clock(); + start_time = get_tod_clock(); goto again; } - if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) + if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) goto again; } if (retries) { @@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock(); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved @@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if (bufnr != q->last_move) { q->last_move = bufnr; if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_clock(); + q->u.in.timestamp = get_tod_clock(); return 1; } else return 0; @@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * At this point we know, that inbound first_to_check * has (probably) not moved (see qdio_inbound_processing). */ - if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { + if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; @@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock(); if (need_siga_sync(q)) if (((queue_type(q) != QDIO_IQDIO_QFMT) && diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index bdb394b066fc..bde5255200dc 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -182,7 +182,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data) struct qdio_q *q; last_ai_time = S390_lowcore.int_clock; - kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++; + inc_irq_stat(IRQIO_QAI); /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); |