summaryrefslogtreecommitdiff
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-12-12 22:41:41 (GMT)
committerDave Jones <davej@redhat.com>2006-12-12 22:41:41 (GMT)
commitc4366889dda8110247be59ca41fddb82951a8c26 (patch)
tree705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /drivers/s390/cio
parentdb2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff)
parente1036502e5263851259d147771226161e5ccc85a (diff)
downloadlinux-c4366889dda8110247be59ca41fddb82951a8c26.tar.xz
Merge ../linus
Conflicts: drivers/cpufreq/cpufreq.c
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/chsc.c110
-rw-r--r--drivers/s390/cio/cio.c190
-rw-r--r--drivers/s390/cio/cio.h6
-rw-r--r--drivers/s390/cio/css.c85
-rw-r--r--drivers/s390/cio/css.h12
-rw-r--r--drivers/s390/cio/device.c476
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_fsm.c89
-rw-r--r--drivers/s390/cio/device_id.c10
-rw-r--r--drivers/s390/cio/device_ops.c28
-rw-r--r--drivers/s390/cio/device_pgid.c30
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/qdio.c244
-rw-r--r--drivers/s390/cio/qdio.h32
14 files changed, 899 insertions, 423 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 2d78f0f..cbab8d2 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch)
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
return -ENOMEM;
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
ret = chsc_get_sch_desc_irq(sch, page);
if (ret) {
static int cio_chsc_err_msg;
@@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch)
cio_chsc_err_msg = 1;
}
}
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
free_page((unsigned long)page);
if (!ret) {
int j, chpid, mask;
@@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if (j >= 8)
return 0;
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
stsch(sch->schid, &schib);
if (!schib.pmcw.dnv)
@@ -251,6 +251,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
cc = cio_clear(sch);
if (cc == -ENODEV)
goto out_unreg;
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
/* Call handler. */
if (sch->driver && sch->driver->termination)
sch->driver->termination(&sch->dev);
@@ -263,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
else if (sch->lpm == mask)
goto out_unreg;
out_unlock:
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
return 0;
out_unreg:
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
sch->lpm = 0;
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
@@ -376,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
/* Check if a subchannel is newly available. */
return s390_process_res_acc_new_sch(schid);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
chp_mask = s390_process_res_acc_sch(res_data, sch);
if (chp_mask == 0) {
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
@@ -395,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
@@ -633,21 +635,21 @@ __chp_add(struct subchannel_id schid, void *data)
if (!sch)
/* Check if the subchannel is now available. */
return __chp_add_new_sch(schid);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
for (i=0; i<8; i++) {
mask = 0x80 >> i;
if ((sch->schib.pmcw.pim & mask) &&
(sch->schib.pmcw.chpid[i] == chp->id)) {
if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
return -ENXIO;
}
break;
}
}
if (i==8) {
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
return 0;
}
sch->lpm = ((sch->schib.pmcw.pim &
@@ -658,7 +660,7 @@ __chp_add(struct subchannel_id schid, void *data)
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
@@ -711,9 +713,6 @@ static inline int check_for_io_on_path(struct subchannel *sch, int index)
{
int cc;
- if (!device_is_online(sch))
- /* cio could be doing I/O. */
- return 0;
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
@@ -722,6 +721,26 @@ static inline int check_for_io_on_path(struct subchannel *sch, int index)
return 0;
}
+static void terminate_internal_io(struct subchannel *sch)
+{
+ if (cio_clear(sch)) {
+ /* Recheck device in case clear failed. */
+ sch->lpm = 0;
+ if (device_trigger_verify(sch) != 0) {
+ if(css_enqueue_subchannel_slow(sch->schid)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ }
+ return;
+ }
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
+}
+
static inline void
__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
{
@@ -731,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
if (!sch->ssd_info.valid)
return;
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) {
if (sch->ssd_info.chpid[chp] != chpid)
@@ -744,23 +763,29 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- } else {
- sch->opm &= ~(0x80 >> chp);
- sch->lpm &= ~(0x80 >> chp);
- if (check_for_io_on_path(sch, chp))
+ break;
+ }
+ sch->opm &= ~(0x80 >> chp);
+ sch->lpm &= ~(0x80 >> chp);
+ if (check_for_io_on_path(sch, chp)) {
+ if (device_is_online(sch))
/* Path verification is done after killing. */
device_kill_io(sch);
- else if (!sch->lpm) {
+ else
+ /* Kill and retry internal I/O. */
+ terminate_internal_io(sch);
+ } else if (!sch->lpm) {
+ if (device_trigger_verify(sch) != 0) {
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
- } else if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
- }
+ }
+ } else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
break;
}
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
}
static int
@@ -1465,41 +1490,6 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
return desc;
}
-static int reset_channel_path(struct channel_path *chp)
-{
- int cc;
-
- cc = rchp(chp->id);
- switch (cc) {
- case 0:
- return 0;
- case 2:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-
-static void reset_channel_paths_css(struct channel_subsystem *css)
-{
- int i;
-
- for (i = 0; i <= __MAX_CHPID; i++) {
- if (css->chps[i])
- reset_channel_path(css->chps[i]);
- }
-}
-
-void cio_reset_channel_paths(void)
-{
- int i;
-
- for (i = 0; i <= __MAX_CSSID; i++) {
- if (css[i] && css[i]->valid)
- reset_channel_paths_css(css[i]);
- }
-}
-
static int __init
chsc_alloc_sei_area(void)
{
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8936e46..7835a71 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -21,6 +21,7 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
+#include <asm/reset.h>
#include "airq.h"
#include "cio.h"
#include "css.h"
@@ -28,6 +29,7 @@
#include "ioasm.h"
#include "blacklist.h"
#include "cio_debug.h"
+#include "../s390mach.h"
debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
@@ -141,11 +143,11 @@ cio_tpi(void)
return 1;
local_bh_disable();
irq_enter ();
- spin_lock(&sch->lock);
+ spin_lock(sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
if (sch->driver && sch->driver->irq)
sch->driver->irq(&sch->dev);
- spin_unlock(&sch->lock);
+ spin_unlock(sch->lock);
irq_exit ();
_local_bh_enable();
return 1;
@@ -413,6 +415,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ if (sch_is_pseudo_sch(sch))
+ return -EINVAL;
ccode = stsch (sch->schid, &sch->schib);
if (ccode)
return -ENODEV;
@@ -460,6 +464,8 @@ cio_disable_subchannel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ if (sch_is_pseudo_sch(sch))
+ return 0;
ccode = stsch (sch->schid, &sch->schib);
if (ccode == 3) /* Not operational. */
return -ENODEV;
@@ -494,6 +500,15 @@ cio_disable_subchannel (struct subchannel *sch)
return ret;
}
+int cio_create_sch_lock(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+ spin_lock_init(sch->lock);
+ return 0;
+}
+
/*
* cio_validate_subchannel()
*
@@ -511,6 +526,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
{
char dbf_txt[15];
int ccode;
+ int err;
sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt);
@@ -518,9 +534,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
/* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel));
- spin_lock_init(&sch->lock);
+ sch->schid = schid;
+ if (cio_is_console(schid)) {
+ sch->lock = cio_get_console_lock();
+ } else {
+ err = cio_create_sch_lock(sch);
+ if (err)
+ goto out;
+ }
mutex_init(&sch->reg_mutex);
-
/* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
schid.sch_no);
@@ -532,10 +554,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
* is not valid.
*/
ccode = stsch_err (schid, &sch->schib);
- if (ccode)
- return (ccode == 3) ? -ENXIO : ccode;
-
- sch->schid = schid;
+ if (ccode) {
+ err = (ccode == 3) ? -ENXIO : ccode;
+ goto out;
+ }
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
@@ -548,14 +570,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
"non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
- return sch->st;
+ err = sch->st;
+ goto out;
}
/* Initialization for io subchannels. */
- if (!sch->schib.pmcw.dnv)
+ if (!sch->schib.pmcw.dnv) {
/* io subchannel but device number is invalid. */
- return -ENODEV;
-
+ err = -ENODEV;
+ goto out;
+ }
/* Devno is valid. */
if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/*
@@ -565,7 +589,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(0, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
sch->opm = 0xff;
if (!cio_is_console(sch->schid))
@@ -593,6 +618,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
return 0;
+out:
+ if (!cio_is_console(schid))
+ kfree(sch->lock);
+ sch->lock = NULL;
+ return err;
}
/*
@@ -635,7 +665,7 @@ do_IRQ (struct pt_regs *regs)
}
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch)
- spin_lock(&sch->lock);
+ spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */
if (tsch (tpi_info->schid, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */
@@ -646,7 +676,7 @@ do_IRQ (struct pt_regs *regs)
sch->driver->irq(&sch->dev);
}
if (sch)
- spin_unlock(&sch->lock);
+ spin_unlock(sch->lock);
/*
* Are more interrupts pending?
* If so, the tpi instruction will update the lowcore
@@ -685,10 +715,10 @@ wait_cons_dev (void)
__ctl_load (cr6, 6, 6);
do {
- spin_unlock(&console_subchannel.lock);
+ spin_unlock(console_subchannel.lock);
if (!cio_tpi())
cpu_relax();
- spin_lock(&console_subchannel.lock);
+ spin_lock(console_subchannel.lock);
} while (console_subchannel.schib.scsw.actl != 0);
/*
* restore previous isc value
@@ -841,26 +871,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
return -EBUSY;
}
-struct sch_match_id {
- struct subchannel_id schid;
- struct ccw_dev_id devid;
- int rc;
-};
-
-static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
- void *data)
+static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
{
struct schib schib;
- struct sch_match_id *match_id = data;
if (stsch_err(schid, &schib))
return -ENXIO;
- if (match_id && schib.pmcw.dnv &&
- (schib.pmcw.dev == match_id->devid.devno) &&
- (schid.ssid == match_id->devid.ssid)) {
- match_id->schid = schid;
- match_id->rc = 0;
- }
if (!schib.pmcw.ena)
return 0;
switch(__disable_subchannel_easy(schid, &schib)) {
@@ -876,27 +892,111 @@ static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
return 0;
}
-static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
- struct subchannel_id *schid)
+static atomic_t chpid_reset_count;
+
+static void s390_reset_chpids_mcck_handler(void)
+{
+ struct crw crw;
+ struct mci *mci;
+
+ /* Check for pending channel report word. */
+ mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
+ if (!mci->cp)
+ return;
+ /* Process channel report words. */
+ while (stcrw(&crw) == 0) {
+ /* Check for responses to RCHP. */
+ if (crw.slct && crw.rsc == CRW_RSC_CPATH)
+ atomic_dec(&chpid_reset_count);
+ }
+}
+
+#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
+static void css_reset(void)
+{
+ int i, ret;
+ unsigned long long timeout;
+
+ /* Reset subchannels. */
+ for_each_subchannel(__shutdown_subchannel_easy, NULL);
+ /* Reset channel paths. */
+ s390_reset_mcck_handler = s390_reset_chpids_mcck_handler;
+ /* Enable channel report machine checks. */
+ __ctl_set_bit(14, 28);
+ /* Temporarily reenable machine checks. */
+ local_mcck_enable();
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ ret = rchp(i);
+ if ((ret == 0) || (ret == 2))
+ /*
+ * rchp either succeeded, or another rchp is already
+ * in progress. In either case, we'll get a crw.
+ */
+ atomic_inc(&chpid_reset_count);
+ }
+ /* Wait for machine check for all channel paths. */
+ timeout = get_clock() + (RCHP_TIMEOUT << 12);
+ while (atomic_read(&chpid_reset_count) != 0) {
+ if (get_clock() > timeout)
+ break;
+ cpu_relax();
+ }
+ /* Disable machine checks again. */
+ local_mcck_disable();
+ /* Disable channel report machine checks. */
+ __ctl_clear_bit(14, 28);
+ s390_reset_mcck_handler = NULL;
+}
+
+static struct reset_call css_reset_call = {
+ .fn = css_reset,
+};
+
+static int __init init_css_reset_call(void)
+{
+ atomic_set(&chpid_reset_count, 0);
+ register_reset_call(&css_reset_call);
+ return 0;
+}
+
+arch_initcall(init_css_reset_call);
+
+struct sch_match_id {
+ struct subchannel_id schid;
+ struct ccw_dev_id devid;
+ int rc;
+};
+
+static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+ struct sch_match_id *match_id = data;
+
+ if (stsch_err(schid, &schib))
+ return -ENXIO;
+ if (schib.pmcw.dnv &&
+ (schib.pmcw.dev == match_id->devid.devno) &&
+ (schid.ssid == match_id->devid.ssid)) {
+ match_id->schid = schid;
+ match_id->rc = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static int reipl_find_schid(struct ccw_dev_id *devid,
+ struct subchannel_id *schid)
{
struct sch_match_id match_id;
match_id.devid = *devid;
match_id.rc = -ENODEV;
- local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
+ for_each_subchannel(__reipl_subchannel_match, &match_id);
if (match_id.rc == 0)
*schid = match_id.schid;
return match_id.rc;
}
-
-void clear_all_subchannels(void)
-{
- local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
-}
-
extern void do_reipl_asm(__u32 schid);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
@@ -904,9 +1004,9 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
{
struct subchannel_id schid;
- if (clear_all_subchannels_and_match(devid, &schid))
+ s390_reset_system();
+ if (reipl_find_schid(devid, &schid) != 0)
panic("IPL Device not found\n");
- cio_reset_channel_paths();
do_reipl_asm(*((__u32*)&schid));
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 4541c1a..35154a2 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -87,7 +87,7 @@ struct orb {
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
- spinlock_t lock; /* subchannel lock */
+ spinlock_t *lock; /* subchannel lock */
struct mutex reg_mutex;
enum {
SUBCHANNEL_TYPE_IO = 0,
@@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
+int cio_create_sch_lock(struct subchannel *);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
+extern spinlock_t * cio_get_console_lock(void);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
+#define cio_get_console_lock() NULL;
#endif
extern int cio_show_msg;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a2dee5b..4c81d89 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch)
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
+ kfree(sch->lock);
kfree(sch);
}
-
}
static void
@@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev)
struct subchannel *sch;
sch = to_subchannel(dev);
- if (!cio_is_console(sch->schid))
+ if (!cio_is_console(sch->schid)) {
+ kfree(sch->lock);
kfree(sch);
+ }
}
extern int css_get_ssd_info(struct subchannel *sch);
@@ -135,14 +137,16 @@ css_register_subchannel(struct subchannel *sch)
sch->dev.parent = &css[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
-
+ sch->dev.groups = subch_attr_groups;
+
/* make it known to the system */
ret = css_sch_device_register(sch);
- if (ret)
+ if (ret) {
printk (KERN_WARNING "%s: could not register %s\n",
__func__, sch->dev.bus_id);
- else
- css_get_ssd_info(sch);
+ return ret;
+ }
+ css_get_ssd_info(sch);
return ret;
}
@@ -201,18 +205,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
unsigned long flags;
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
disc = device_is_disconnected(sch);
if (disc && slow) {
/* Disconnected devices are evaluated directly only.*/
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
return 0;
}
/* No interrupt after machine check - kill pending timers. */
device_kill_pending_timer(sch);
if (!disc && !slow) {
/* Non-disconnected devices are evaluated on the slow path. */
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
return -EAGAIN;
}
event = css_get_subchannel_status(sch);
@@ -237,9 +241,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
/* Ask driver what to do with device. */
action = UNREGISTER;
if (sch->driver && sch->driver->notify) {
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
ret = sch->driver->notify(&sch->dev, event);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
if (ret)
action = NONE;
}
@@ -264,17 +268,13 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
case UNREGISTER:
case UNREGISTER_PROBE:
/* Unregister device (will use subchannel lock). */
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
-
- /* Probe if necessary. */
- if (action == UNREGISTER_PROBE)
- ret = css_probe_device(sch->schid);
break;
case REPROBE:
device_trigger_reprobe(sch);
@@ -282,7 +282,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
default:
break;
}
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Probe if necessary. */
+ if (action == UNREGISTER_PROBE)
+ ret = css_probe_device(sch->schid);
return ret;
}
@@ -335,7 +338,7 @@ static LIST_HEAD(slow_subchannels_head);
static DEFINE_SPINLOCK(slow_subchannel_lock);
static void
-css_trigger_slow_path(void)
+css_trigger_slow_path(struct work_struct *unused)
{
CIO_TRACE_EVENT(4, "slowpath");
@@ -360,8 +363,7 @@ css_trigger_slow_path(void)
spin_unlock_irq(&slow_subchannel_lock);
}
-typedef void (*workfunc)(void *);
-DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
+DECLARE_WORK(slow_path_work, css_trigger_slow_path);
struct workqueue_struct *slow_path_wq;
/* Reprobe subchannel if unregistered. */
@@ -398,7 +400,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
}
/* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(void *data)
+static void reprobe_all(struct work_struct *unused)
{
int ret;
@@ -414,7 +416,7 @@ static void reprobe_all(void *data)
need_reprobe);
}
-DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
+DECLARE_WORK(css_reprobe_work, reprobe_all);
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
@@ -575,12 +577,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
-static inline void __init
-setup_css(int nr)
+static inline int __init setup_css(int nr)
{
u32 tod_high;
+ int ret;
memset(css[nr], 0, sizeof(struct channel_subsystem));
+ css[nr]->pseudo_subchannel =
+ kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
+ if (!css[nr]->pseudo_subchannel)
+ return -ENOMEM;
+ css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
+ css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
+ sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
+ ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
+ if (ret) {
+ kfree(css[nr]->pseudo_subchannel);
+ return ret;
+ }
mutex_init(&css[nr]->mutex);
css[nr]->valid = 1;
css[nr]->cssid = nr;
@@ -588,6 +602,7 @@ setup_css(int nr)
css[nr]->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
css_generate_pgid(css[nr], tod_high);
+ return 0;
}
/*
@@ -624,10 +639,12 @@ init_channel_subsystem (void)
ret = -ENOMEM;
goto out_unregister;
}
- setup_css(i);
- ret = device_register(&css[i]->device);
+ ret = setup_css(i);
if (ret)
goto out_free;
+ ret = device_register(&css[i]->device);
+ if (ret)
+ goto out_free_all;
if (css_characteristics_avail &&
css_chsc_characteristics.secm) {
ret = device_create_file(&css[i]->device,
@@ -635,6 +652,9 @@ init_channel_subsystem (void)
if (ret)
goto out_device;
}
+ ret = device_register(&css[i]->pseudo_subchannel->dev);
+ if (ret)
+ goto out_file;
}
css_init_done = 1;
@@ -642,13 +662,19 @@ init_channel_subsystem (void)
for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
+out_file:
+ device_remove_file(&css[i]->device, &dev_attr_cm_enable);
out_device:
device_unregister(&css[i]->device);
+out_free_all:
+ kfree(css[i]->pseudo_subchannel->lock);
+ kfree(css[i]->pseudo_subchannel);
out_free:
kfree(css[i]);
out_unregister:
while (i > 0) {
i--;
+ device_unregister(&css[i]->pseudo_subchannel->dev);
if (css_characteristics_avail && css_chsc_characteristics.secm)
device_remove_file(&css[i]->device,
&dev_attr_cm_enable);
@@ -660,6 +686,11 @@ out:
return ret;
}
+int sch_is_pseudo_sch(struct subchannel *sch)
+{
+ return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+}
+
/*
* find a driver for a subchannel. They identify by the subchannel
* type with the exception that the console subchannel driver has its own
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4c2ff83..3464c5b 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -73,6 +73,8 @@ struct senseid {
} __attribute__ ((packed,aligned(4)));
struct ccw_device_private {
+ struct ccw_device *cdev;
+ struct subchannel *sch;
int state; /* device state */
atomic_t onoff;
unsigned long registered;
@@ -94,6 +96,7 @@ struct ccw_device_private {
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int intretry:1; /* retry internal operation */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
@@ -157,6 +160,8 @@ struct channel_subsystem {
int cm_enabled;
void *cub_addr1;
void *cub_addr2;
+ /* for orphaned ccw devices */
+ struct subchannel *pseudo_subchannel;
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
@@ -171,6 +176,8 @@ void device_trigger_reprobe(struct subchannel *);
/* Helper functions for vary on/off. */
int device_is_online(struct subchannel *);
void device_kill_io(struct subchannel *);
+void device_set_intretry(struct subchannel *sch);
+int device_trigger_verify(struct subchannel *sch);
/* Machine check helper function. */
void device_kill_pending_timer(struct subchannel *);
@@ -182,6 +189,11 @@ void css_clear_subchannel_slow_list(void);
int css_slow_subchannels_exist(void);
extern int need_rescan;
+int sch_is_pseudo_sch(struct subchannel *);
+
extern struct workqueue_struct *slow_path_wq;
extern struct work_struct slow_path_work;
+
+int subchannel_add_files (struct device *);
+extern struct attribute_group *subch_attr_groups[];
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 94bdd4d..8035790 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -23,6 +23,7 @@
#include <asm/param.h> /* HZ */
#include "cio.h"
+#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
@@ -234,9 +235,11 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
ssize_t ret = 0;
int chp;
- for (chp = 0; chp < 8; chp++)
- ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
-
+ if (ssd)
+ for (chp = 0; chp < 8; chp++)
+ ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
+ else
+ ret += sprintf (buf, "n/a");
ret += sprintf (buf+ret, "\n");
return min((ssize_t)PAGE_SIZE, ret);
}
@@ -294,14 +297,44 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, cdev->online ? "1\n" : "0\n");
}
+int ccw_device_is_orphan(struct ccw_device *cdev)
+{
+ return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
+}
+
+static void ccw_device_unregister(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
+ if (test_and_clear_bit(1, &cdev->private->registered))
+ device_unregister(&cdev->dev);
+ put_device(&cdev->dev);
+}
+
static void
ccw_device_remove_disconnected(struct ccw_device *cdev)
{
struct subchannel *sch;
+ unsigned long flags;
/*
* Forced offline in disconnected state means
* 'throw away device'.
*/
+ if (ccw_device_is_orphan(cdev)) {
+ /* Deregister ccw device. */
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (get_device(&cdev->dev)) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_unregister);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ }
+ return ;
+ }
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
@@ -462,6 +495,8 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
struct ccw_device *cdev = to_ccwdev(dev);
struct subchannel *sch;
+ if (ccw_device_is_orphan(cdev))
+ return sprintf(buf, "no device\n");
switch (cdev->private->state) {
case DEV_STATE_BOXED:
return sprintf(buf, "boxed\n");
@@ -498,11 +533,10 @@ static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
-static inline int
-subchannel_add_files (struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &subch_attr_group);
-}
+struct attribute_group *subch_attr_groups[] = {
+ &subch_attr_group,
+ NULL,
+};
static struct attribute * ccwdev_attrs[] = {
&dev_attr_devtype.attr,
@@ -532,8 +566,7 @@ device_remove_files(struct device *dev)
/* this is a simple abstraction for device_register that sets the
* correct bus type and adds the bus specific files */
-int
-ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_register(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
int ret;
@@ -564,11 +597,10 @@ match_devno(struct device * dev, void * data)
cdev = to_ccwdev(dev);
if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
+ !ccw_device_is_orphan(cdev) &&
ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
- (cdev != d->sibling)) {
- cdev->private->state = DEV_STATE_NOT_OPER;
+ (cdev != d->sibling))
return 1;
- }
return 0;
}
@@ -585,13 +617,36 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
return dev ? to_ccwdev(dev) : NULL;
}
-static void
-ccw_device_add_changed(void *data)
+static int match_orphan(struct device *dev, void *data)
+{
+ struct ccw_dev_id *dev_id;
+ struct ccw_device *cdev;
+
+ dev_id = data;
+ cdev = to_ccwdev(dev);
+ return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
+}
+
+static struct ccw_device *
+get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
+ struct ccw_dev_id *dev_id)
{
+ struct device *dev;
+
+ dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
+ match_orphan);
+
+ return dev ? to_ccwdev(dev) : NULL;
+}
+static void
+ccw_device_add_changed(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
- cdev = data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
if (device_add(&cdev->dev)) {
put_device(&cdev->dev);
return;
@@ -603,64 +658,21 @@ ccw_device_add_changed(void *data)
}
}
-extern int css_get_ssd_info(struct subchannel *sch);
-
-void
-ccw_device_do_unreg_rereg(void *data)
+void ccw_device_do_unreg_rereg(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
- int need_rename;
- cdev = data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
- if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
- /*
- * The device number has changed. This is usually only when
- * a device has been detached under VM and then re-appeared
- * on another subchannel because of a different attachment
- * order than before. Ideally, we should should just switch
- * subchannels, but unfortunately, this is not possible with
- * the current implementation.
- * Instead, we search for the old subchannel for this device
- * number and deregister so there are no collisions with the
- * newly registered ccw_device.
- * FIXME: Find another solution so the block layer doesn't
- * get possibly sick...
- */
- struct ccw_device *other_cdev;
- struct ccw_dev_id dev_id;
-
- need_rename = 1;
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
- other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
- if (other_cdev) {
- struct subchannel *other_sch;
-
- other_sch = to_subchannel(other_cdev->dev.parent);
- if (get_device(&other_sch->dev)) {
- stsch(other_sch->schid, &other_sch->schib);
- if (other_sch->schib.pmcw.dnv) {
- other_sch->schib.pmcw.intparm = 0;
- cio_modify(other_sch);
- }
- css_sch_device_unregister(other_sch);
- }
- }
- /* Update ssd info here. */
- css_get_ssd_info(sch);
- cdev->private->dev_id.devno = sch->schib.pmcw.dev;
- } else
- need_rename = 0;
+
device_remove_files(&cdev->dev);
if (test_and_clear_bit(1, &cdev->private->registered))
device_del(&cdev->dev);
- if (need_rename)
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_add_changed, cdev);
+ ccw_device_add_changed);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
@@ -674,22 +686,210 @@ ccw_device_release(struct device *dev)
kfree(cdev);
}
+static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (cdev) {
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (cdev->private)
+ return cdev;
+ }
+ kfree(cdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int io_subchannel_initialize_dev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ cdev->private->cdev = cdev;
+ atomic_set(&cdev->private->onoff, 0);
+ cdev->dev.parent = &sch->dev;
+ cdev->dev.release = ccw_device_release;
+ INIT_LIST_HEAD(&cdev->private->kick_work.entry);
+ /* Do first half of device_register. */
+ device_initialize(&cdev->dev);
+ if (!get_device(&sch->dev)) {
+ if (cdev->dev.release)
+ cdev->dev.release(&cdev->dev);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ cdev = io_subchannel_allocate_dev(sch);
+ if (!IS_ERR(cdev)) {
+ ret = io_subchannel_initialize_dev(sch, cdev);
+ if (ret) {
+ kfree(cdev);
+ cdev = ERR_PTR(ret);
+ }
+ }
+ return cdev;
+}
+
+static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
+
+static void sch_attach_device(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ spin_lock_irq(sch->lock);
+ sch->dev.driver_data = cdev;
+ cdev->private->schid = sch->schid;
+ cdev->ccwlock = sch->lock;
+ device_trigger_reprobe(sch);
+ spin_unlock_irq(sch->lock);
+}
+
+static void sch_attach_disconnected_device(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct subchannel *other_sch;
+ int ret;
+
+ other_sch = to_subchannel(get_device(cdev->dev.parent));
+ ret = device_move(&cdev->dev, &sch->dev);
+ if (ret) {
+ CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed "
+ "(ret=%d)!\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ put_device(&other_sch->dev);
+ return;
+ }
+ other_sch->dev.driver_data = NULL;
+ /* No need to keep a subchannel without ccw device around. */
+ css_sch_device_unregister(other_sch);
+ put_device(&other_sch->dev);
+ sch_attach_device(sch, cdev);
+}
+
+static void sch_attach_orphaned_device(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ int ret;
+
+ /* Try to move the ccw device to its new subchannel. */
+ ret = device_move(&cdev->dev, &sch->dev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
+ "failed (ret=%d)!\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ return;
+ }
+ sch_attach_device(sch, cdev);
+}
+
+static void sch_create_and_recog_new_device(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ /* Need to allocate a new ccw device. */
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ /* OK, we did everything we could... */
+ css_sch_device_unregister(sch);
+ return;
+ }
+ spin_lock_irq(sch->lock);
+ sch->dev.driver_data = cdev;
+ spin_unlock_irq(sch->lock);
+ /* Start recognition for the new ccw device. */
+ if (io_subchannel_recog(cdev, sch)) {
+ spin_lock_irq(sch->lock);
+ sch->dev.driver_data = NULL;
+ spin_unlock_irq(sch->lock);
+ if (cdev->dev.release)
+ cdev->dev.release(&cdev->dev);
+ css_sch_device_unregister(sch);
+ }
+}
+
+
+void ccw_device_move_to_orphanage(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ struct ccw_device *replacing_cdev;
+ struct subchannel *sch;
+ int ret;
+ struct channel_subsystem *css;
+ struct ccw_dev_id dev_id;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
+ sch = to_subchannel(cdev->dev.parent);
+ css = to_css(sch->dev.parent);
+ dev_id.devno = sch->schib.pmcw.dev;
+ dev_id.ssid = sch->schid.ssid;
+
+ /*
+ * Move the orphaned ccw device to the orphanage so the replacing
+ * ccw device can take its place on the subchannel.
+ */
+ ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
+ "(ret=%d)!\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ return;
+ }
+ cdev->ccwlock = css->pseudo_subchannel->lock;
+ /*
+ * Search for the replacing ccw device
+ * - among the disconnected devices
+ * - in the orphanage
+ */
+ replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
+ if (replacing_cdev) {
+ sch_attach_disconnected_device(sch, replacing_cdev);
+ return;
+ }
+ replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
+ if (replacing_cdev) {
+ sch_attach_orphaned_device(sch, replacing_cdev);
+ return;
+ }
+ sch_create_and_recog_new_device(sch);
+}
+
/*
* Register recognized device.
*/
static void
-io_subchannel_register(void *data)
+io_subchannel_register(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
unsigned long flags;
- cdev = data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
+ /*
+ * io_subchannel_register() will also be called after device
+ * recognition has been done for a boxed device (which will already
+ * be registered). We need to reprobe since we may now have sense id
+ * information.
+ */
if (klist_node_attached(&cdev->dev.knode_parent)) {
- bus_rescan_devices(&ccw_bus_type);
+ if (!cdev->drv) {
+ ret = device_reprobe(&cdev->dev);
+ if (ret)
+ /* We can't do much here. */
+ dev_info(&cdev->dev, "device_reprobe() returned"
+ " %d\n", ret);
+ }
goto out;
}
/* make it known to the system */
@@ -698,9 +898,9 @@ io_subchannel_register(void *data)
printk (KERN_WARNING "%s: could not register %s\n",
__func__, cdev->dev.bus_id);
put_device(&cdev->dev);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL;
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
kfree (cdev->private);
kfree (cdev);
put_device(&sch->dev);
@@ -708,11 +908,6 @@ io_subchannel_register(void *data)
wake_up(&ccw_device_init_wq);
return;
}
-
- ret = subchannel_add_files(cdev->dev.parent);
- if (ret)
- printk(KERN_WARNING "%s: could not add attributes to %s\n",
- __func__, sch->dev.bus_id);
put_device(&cdev->dev);
out:
cdev->private->flags.recog_done = 1;
@@ -723,11 +918,14 @@ out:
}
void
-ccw_device_call_sch_unregister(void *data)
+ccw_device_call_sch_unregister(struct work_struct *work)
{
- struct ccw_device *cdev = data;
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
struct subchannel *sch;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
@@ -757,7 +955,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
break;
sch = to_subchannel(cdev->dev.parent);
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister, cdev);
+ ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@@ -772,7 +970,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
if (!get_device(&cdev->dev))
break;
PREPARE_WORK(&cdev->private->kick_work,
- io_subchannel_register, cdev);
+ io_subchannel_register);
queue_work(slow_path_wq, &cdev->private->kick_work);
break;
}
@@ -786,7 +984,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
sch->dev.driver_data = cdev;
sch->driver = &io_subchannel_driver;
- cdev->ccwlock = &sch->lock;
+ cdev->ccwlock = sch->lock;
/* Init private data. */
priv = cdev->private;
@@ -806,9 +1004,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
rc = ccw_device_recognition(cdev);
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
if (rc) {
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@@ -816,12 +1014,55 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
return rc;
}
+static void ccw_device_move_to_sch(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ int rc;
+ struct subchannel *sch;
+ struct ccw_device *cdev;
+ struct subchannel *former_parent;
+
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ sch = priv->sch;
+ cdev = priv->cdev;
+ former_parent = ccw_device_is_orphan(cdev) ?
+ NULL : to_subchannel(get_device(cdev->dev.parent));
+ mutex_lock(&sch->reg_mutex);
+ /* Try to move the ccw device to its new subchannel. */
+ rc = device_move(&cdev->dev, &sch->dev);
+ mutex_unlock(&sch->reg_mutex);
+ if (rc) {
+ CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel "
+ "0.%x.%04x failed (ret=%d)!\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schid.sch_no, rc);
+ css_sch_device_unregister(sch);
+ goto out;
+ }
+ if (former_parent) {
+ spin_lock_irq(former_parent->lock);
+ former_parent->dev.driver_data = NULL;
+ spin_unlock_irq(former_parent->lock);
+ css_sch_device_unregister(former_parent);
+ /* Reset intparm to zeroes. */
+ former_parent->schib.pmcw.intparm = 0;
+ cio_modify(former_parent);
+ }
+ sch_attach_device(sch, cdev);
+out:
+ if (former_parent)
+ put_device(&former_parent->dev);
+ put_device(&cdev->dev);
+}
+
static int
io_subchannel_probe (struct subchannel *sch)
{
struct ccw_device *cdev;
int rc;
unsigned long flags;
+ struct ccw_dev_id dev_id;
if (sch->dev.driver_data) {
/*
@@ -832,7 +1073,6 @@ io_subchannel_probe (struct subchannel *sch)
cdev = sch->dev.driver_data;
device_initialize(&cdev->dev);
ccw_device_register(cdev);
- subchannel_add_files(&sch->dev);
/*
* Check if the device is already online. If it is
* the reference count needs to be corrected
@@ -845,33 +1085,37 @@ io_subchannel_probe (struct subchannel *sch)
get_device(&cdev->dev);
return 0;
}
- cdev = kzalloc (sizeof(*cdev), GFP_KERNEL);
+ /*
+ * First check if a fitting device may be found amongst the
+ * disconnected devices or in the orphanage.
+ */
+ dev_id.devno = sch->schib.pmcw.dev;
+ dev_id.ssid = sch->schid.ssid;
+ cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
if (!cdev)
- return -ENOMEM;
- cdev->private = kzalloc(sizeof(struct ccw_device_private),
- GFP_KERNEL | GFP_DMA);
- if (!cdev->private) {
- kfree(cdev);
- return -ENOMEM;
- }
- atomic_set(&cdev->private->onoff, 0);
- cdev->dev.parent = &sch->dev;
- cdev->dev.release = ccw_device_release;
- INIT_LIST_HEAD(&cdev->private->kick_work.entry);
- /* Do first half of device_register. */
- device_initialize(&cdev->dev);
-
- if (!get_device(&sch->dev)) {
- if (cdev->dev.release)
- cdev->dev.release(&cdev->dev);
- return -ENODEV;
+ cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
+ &dev_id);
+ if (cdev) {
+ /*
+ * Schedule moving the device until when we have a registered
+ * subchannel to move to and succeed the probe. We can
+ * unregister later again, when the probe is through.
+ */
+ cdev->private->sch = sch;
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_move_to_sch);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
+ return 0;
}
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev))
+ return PTR_ERR(cdev);
rc = io_subchannel_recog(cdev, sch);
if (rc) {
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL;
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
if (cdev->dev.release)
cdev->dev.release(&cdev->dev);
}
@@ -879,17 +1123,6 @@ io_subchannel_probe (struct subchannel *sch)
return rc;
}
-static void
-ccw_device_unregister(void *data)
-{
- struct ccw_device *cdev;
-
- cdev = (struct ccw_device *)data;
- if (test_and_clear_bit(1, &cdev->private->registered))
- device_unregister(&cdev->dev);
- put_device(&cdev->dev);
-}
-
static int
io_subchannel_remove (struct subchannel *sch)
{
@@ -910,7 +1143,7 @@ io_subchannel_remove (struct subchannel *sch)
*/
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_unregister, cdev);
+ ccw_device_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
return 0;
@@ -949,6 +1182,9 @@ io_subchannel_ioterm(struct device *dev)
cdev = dev->driver_data;
if (!cdev)
return;
+ /* Internal I/O will be retried by the interrupt handler. */
+ if (cdev->private->flags.intretry)
+ return;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@@ -989,6 +1225,13 @@ static struct ccw_device console_cdev;
static struct ccw_device_private console_private;
static int console_cdev_in_use;
+static DEFINE_SPINLOCK(ccw_console_lock);
+
+spinlock_t * cio_get_console_lock(void)
+{
+ return &ccw_console_lock;
+}
+
static int
ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
{
@@ -1034,6 +1277,7 @@ ccw_device_probe_console(void)
memset(&console_cdev, 0, sizeof(struct ccw_device));
memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private;
+ console_private.cdev = &console_cdev;
ret = ccw_device_console_enable(&console_cdev, sch);
if (ret) {
cio_release_console();
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index c6140cc..29db634 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -78,9 +78,10 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *);
-int ccw_device_register(struct ccw_device *);
-void ccw_device_do_unreg_rereg(void *);
-void ccw_device_call_sch_unregister(void *);
+void ccw_device_do_unreg_rereg(struct work_struct *);
+void ccw_device_call_sch_unregister(struct work_struct *);
+void ccw_device_move_to_orphanage(struct work_struct *);
+int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index fcaf28d..eed1457 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -59,6 +59,27 @@ device_set_disconnected(struct subchannel *sch)
cdev->private->state = DEV_STATE_DISCONNECTED;
}
+void device_set_intretry(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch->dev.driver_data;
+ if (!cdev)
+ return;
+ cdev->private->flags.intretry = 1;
+}
+
+int device_trigger_verify(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch->dev.driver_data;
+ if (!cdev || !cdev->online)
+ return -EINVAL;
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return 0;
+}
+
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
@@ -165,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev)
/*
* Check if cu type and device type still match. If
* not, it is certainly another device and we have to
- * de- and re-register. Also check here for non-matching devno.
+ * de- and re-register.
*/
if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
cdev->id.cu_model != cdev->private->senseid.cu_model ||
cdev->id.dev_type != cdev->private->senseid.dev_type ||
- cdev->id.dev_model != cdev->private->senseid.dev_model ||
- cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
+ cdev->id.dev_model != cdev->private->senseid.dev_model) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unreg_rereg, cdev);
+ ccw_device_do_unreg_rereg);
queue_work(ccw_device_work, &cdev->private->kick_work);
return 0;
}
@@ -308,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
static void
-ccw_device_oper_notify(void *data)
+ccw_device_oper_notify(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
- cdev = data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ?
sch->driver->notify(&sch->dev, CIO_OPER) : 0;
if (!ret)
/* Driver doesn't want device back. */
- ccw_device_do_unreg_rereg(cdev);
+ ccw_device_do_unreg_rereg(work);
else {
/* Reenable channel measurements, if needed. */
cmf_reenable(cdev);
@@ -356,8 +378,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
- cdev);
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
@@ -507,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
static void
-ccw_device_nopath_notify(void *data)
+ccw_device_nopath_notify(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
- cdev = data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */
if (sch->lpm)
@@ -526,8 +549,7 @@ ccw_device_nopath_notify(void *data)
cio_disable_subchannel(sch);
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister,
- cdev);
+ ccw_device_call_sch_unregister);
queue_work(ccw_device_work,
&cdev->private->kick_work);
} else
@@ -578,11 +600,15 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
}
break;
case -ETIME:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
default:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
@@ -649,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev)
{
struct subchannel *sch;
+ if (ccw_device_is_orphan(cdev)) {
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
sch = to_subchannel(cdev->dev.parent);
if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
return -ENODEV;
@@ -713,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister, cdev);
+ ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
@@ -744,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
}
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister, cdev);
+ ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
@@ -849,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
@@ -889,6 +919,12 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
* had killed the original request.
*/
if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+ /* Retry Basic Sense if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ ccw_device_do_sense(cdev, irb);
+ return;
+ }
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
@@ -938,7 +974,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify)
/* Start delayed path verification. */
@@ -961,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
@@ -990,7 +1026,7 @@ void device_kill_io(struct subchannel *sch)
if (ret == -ENODEV) {
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
@@ -1002,7 +1038,7 @@ void device_kill_io(struct subchannel *sch)
ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify, cdev);
+ ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else
/* Start delayed path verification. */
@@ -1073,7 +1109,8 @@ device_trigger_reprobe(struct subchannel *sch)
/* Update some values. */
if (stsch(sch->schid, &sch->schib))
return;
-
+ if (!sch->schib.pmcw.dnv)
+ return;
/*
* The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/
@@ -1087,7 +1124,13 @@ device_trigger_reprobe(struct subchannel *sch)
sch->schib.pmcw.mp = 1;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
/* We should also udate ssd info, but this has to wait. */
- ccw_device_start_id(cdev, 0);
+ /* Check if this is another device which appeared on the same sch. */
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_move_to_orphanage);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ } else
+ ccw_device_start_id(cdev, 0);
}
static void
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index a74785b..f172759 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -191,6 +191,8 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -237,8 +239,14 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
return 0; /* Success */
}
/* Check the error cases. */
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Sense ID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
/*
* if the device doesn't support the SenseID
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index b39c1fa..d269607 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
ccw_device_set_timeout(cdev, 0);
if (ret == -EBUSY) {
/* Try again later. */
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
msleep(10);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
continue;
}
if (ret != 0)
@@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
break;
/* Wait for end of request. */
cdev->private->intparm = magic;
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q,
(cdev->private->intparm == -EIO) ||
(cdev->private->intparm == -EAGAIN) ||
(cdev->private->intparm == 0));
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
/* Check at least for channel end / device end */
if (cdev->private->intparm == -EIO) {
/* Non-retryable error. */
@@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
/* Success. */
break;
/* Try again later. */
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
msleep(10);
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
} while (1);
return ret;
@@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
return ret;
}
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
/* Save interrupt handler. */
handler = cdev->handler;
/* Temporarily install own handler. */
@@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
/* Restore interrupt handler. */
cdev->handler = handler;
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
clear_normalized_cda (rdc_ccw);
kfree(rdc_ccw);
@@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
rcd_ccw->count = ciw->count;
rcd_ccw->flags = CCW_FLAG_SLI;
- spin_lock_irq(&sch->lock);
+ spin_lock_irq(sch->lock);
/* Save interrupt handler. */
handler = cdev->handler;
/* Temporarily install own handler. */
@@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
/* Restore interrupt handler. */
cdev->handler = handler;
- spin_unlock_irq(&sch->lock);
+ spin_unlock_irq(sch->lock);
/*
* on success we update the user input parms
@@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev)
kfree(buf);
return -ENOMEM;
}
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
ret = cio_enable_subchannel(sch, 3);
if (ret)
goto out_unlock;
@@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev)
goto out_unlock;
}
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
- spin_lock_irqsave(&sch->lock, flags);
+ spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
@@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev)
out_unlock:
kfree(buf);
kfree(buf2);
- spin_unlock_irqrestore(&sch->lock, flags);
+ spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 2975ce8..cb1879a 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -71,6 +71,8 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -122,8 +124,14 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Sense PGID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons &&
(irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
/*
@@ -253,6 +261,8 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
@@ -293,6 +303,8 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* We expect an interrupt in case of success or busy
@@ -321,8 +333,14 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry Set PGID if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->esw.esw0.erw.cons) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
@@ -360,8 +378,14 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ /* Retry NOP if requested. */
+ if (cdev->private->flags.intretry) {
+ cdev->private->flags.intretry = 0;
+ return -EAGAIN;
+ }
return -ETIME;
+ }
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 3f7cbce..bdcf930 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -319,6 +319,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch->sense_ccw.count = SENSE_MAX_COUNT;
sch->sense_ccw.flags = CCW_FLAG_SLI;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
+
return cio_start (sch, &sch->sense_ccw, 0xff);
}
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 0648ce5..9d4ea44 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -46,6 +46,7 @@
#include <asm/timex.h>
#include <asm/debug.h>
+#include <asm/s390_rdev.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -65,12 +66,12 @@ MODULE_LICENSE("GPL");
/******************** HERE WE GO ***********************************/
static const char version[] = "QDIO base support version 2";
+extern struct bus_type ccw_bus_type;
-#ifdef QDIO_PERFORMANCE_STATS
+static int qdio_performance_stats = 0;
static int proc_perf_file_registration;
static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
static struct qdio_perf_stats perf_stats;
-#endif /* QDIO_PERFORMANCE_STATS */
static int hydra_thinints;
static int is_passthrough = 0;
@@ -275,9 +276,8 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
QDIO_DBF_TEXT4(0,trace,"sigasync");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.siga_syncs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.siga_syncs++;
cc = do_siga_sync(q->schid, gpr2, gpr3);
if (cc)
@@ -322,9 +322,8 @@ qdio_siga_output(struct qdio_q *q)
__u32 busy_bit;
__u64 start_time=0;
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.siga_outs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.siga_outs++;
QDIO_DBF_TEXT4(0,trace,"sigaout");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -358,9 +357,8 @@ qdio_siga_input(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"sigain");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.siga_ins++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.siga_ins++;
cc = do_siga_input(q->schid, q->mask);
@@ -481,7 +479,7 @@ qdio_stop_polling(struct qdio_q *q)
unsigned char state = 0;
struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
- if (!atomic_swap(&q->polling,0))
+ if (!atomic_xchg(&q->polling,0))
return 1;
QDIO_DBF_TEXT4(0,trace,"stoppoll");
@@ -954,9 +952,8 @@ __qdio_outbound_processing(struct qdio_q *q)
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- o_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ o_p_c++;
/* as we're sissies, we'll check next time */
if (likely(!atomic_read(&q->is_in_shutdown))) {
qdio_mark_q(q);
@@ -964,10 +961,10 @@ __qdio_outbound_processing(struct qdio_q *q)
}
return;
}
-#ifdef QDIO_PERFORMANCE_STATS
- o_p_nc++;
- perf_stats.tl_runs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ o_p_nc++;
+ perf_stats.tl_runs++;
+ }
/* see comment in qdio_kick_outbound_q */
siga_attempts=atomic_read(&q->busy_siga_counter);
@@ -1142,15 +1139,16 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
{
int i;
-#ifdef QDIO_PERFORMANCE_STATS
static int old_pcis=0;
static int old_thinints=0;
- if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
- perf_stats.start_time_inbound=NOW;
- else
- old_pcis=perf_stats.pcis;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ if ((old_pcis==perf_stats.pcis)&&
+ (old_thinints==perf_stats.thinints))
+ perf_stats.start_time_inbound=NOW;
+ else
+ old_pcis=perf_stats.pcis;
+ }
i=qdio_get_inbound_buffer_frontier(q);
if ( (i!=GET_SAVED_FRONTIER(q)) ||
@@ -1340,10 +1338,10 @@ qdio_kick_inbound_handler(struct qdio_q *q)
q->siga_error=0;
q->error_status_flags=0;
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
- perf_stats.inbound_cnt++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
+ perf_stats.inbound_cnt++;
+ }
}
static inline void
@@ -1363,9 +1361,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
*/
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- ii_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ ii_p_c++;
/*
* as we might just be about to stop polling, we make
* sure that we check again at least once more
@@ -1373,9 +1370,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
tiqdio_sched_tl();
return;
}
-#ifdef QDIO_PERFORMANCE_STATS
- ii_p_nc++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ ii_p_nc++;
if (unlikely(atomic_read(&q->is_in_shutdown))) {
qdio_unmark_q(q);
goto out;
@@ -1416,11 +1412,11 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
irq_ptr = (struct qdio_irq*)q->irq_ptr;
for (i=0;i<irq_ptr->no_output_qs;i++) {
oq = irq_ptr->output_qs[i];
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs--;
-#endif /* QDIO_PERFORMANCE_STATS */
- if (!qdio_is_outbound_q_done(oq))
+ if (!qdio_is_outbound_q_done(oq)) {
+ if (qdio_performance_stats)
+ perf_stats.tl_runs--;
__qdio_outbound_processing(oq);
+ }
}
}
@@ -1457,9 +1453,8 @@ __qdio_inbound_processing(struct qdio_q *q)
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- i_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ i_p_c++;
/* as we're sissies, we'll check next time */
if (likely(!atomic_read(&q->is_in_shutdown))) {
qdio_mark_q(q);
@@ -1467,10 +1462,10 @@ __qdio_inbound_processing(struct qdio_q *q)
}
return;
}
-#ifdef QDIO_PERFORMANCE_STATS
- i_p_nc++;
- perf_stats.tl_runs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ i_p_nc++;
+ perf_stats.tl_runs++;
+ }
again:
if (qdio_has_inbound_q_moved(q)) {
@@ -1516,9 +1511,8 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
if (unlikely(qdio_reserve_q(q))) {
qdio_release_q(q);
-#ifdef QDIO_PERFORMANCE_STATS
- ii_p_c++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ ii_p_c++;
/*
* as we might just be about to stop polling, we make
* sure that we check again at least once more
@@ -1609,9 +1603,8 @@ tiqdio_tl(unsigned long data)
{
QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.tl_runs++;
tiqdio_inbound_checks();
}
@@ -1918,10 +1911,10 @@ tiqdio_thinint_handler(void)
{
QDIO_DBF_TEXT4(0,trace,"thin_int");
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.thinints++;
- perf_stats.start_time_inbound=NOW;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.thinints++;
+ perf_stats.start_time_inbound=NOW;
+ }
/* SVS only when needed:
* issue SVS to benefit from iqdio interrupt avoidance
@@ -1964,8 +1957,8 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
QDIO_PRINT_WARN("sense data available on qdio channel.\n");
- HEXDUMP16(WARN,"irb: ",irb);
- HEXDUMP16(WARN,"sense data: ",irb->ecw);
+ QDIO_HEXDUMP16(WARN,"irb: ",irb);
+ QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
}
}
@@ -1976,18 +1969,17 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
int i;
struct qdio_q *q;
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.pcis++;
- perf_stats.start_time_inbound=NOW;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.pcis++;
+ perf_stats.start_time_inbound=NOW;
+ }
for (i=0;i<irq_ptr->no_input_qs;i++) {
q=irq_ptr->input_qs[i];
if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
qdio_mark_q(q);
else {
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs--;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.tl_runs--;
__qdio_inbound_processing(q);
}
}
@@ -1995,11 +1987,10 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
return;
for (i=0;i<irq_ptr->no_output_qs;i++) {
q=irq_ptr->output_qs[i];
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.tl_runs--;
-#endif /* QDIO_PERFORMANCE_STATS */
if (qdio_is_outbound_q_done(q))
continue;
+ if (qdio_performance_stats)
+ perf_stats.tl_runs--;
if (!irq_ptr->sync_done_on_outb_pcis)
SYNC_MEMORY;
__qdio_outbound_processing(q);
@@ -2045,11 +2036,13 @@ omit_handler_call:
}
static void
-qdio_call_shutdown(void *data)
+qdio_call_shutdown(struct work_struct *work)
{
+ struct ccw_device_private *priv;
struct ccw_device *cdev;
- cdev = (struct ccw_device *)data;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
put_device(&cdev->dev);
}
@@ -2091,7 +2084,7 @@ qdio_timeout_handler(struct ccw_device *cdev)
if (get_device(&cdev->dev)) {
/* Can't call shutdown from interrupt context. */
PREPARE_WORK(&cdev->private->kick_work,
- qdio_call_shutdown, (void *)cdev);
+ qdio_call_shutdown);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
break;
@@ -3425,7 +3418,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
(callflags&QDIO_FLAG_UNDER_INTERRUPT))
- atomic_swap(&q->polling,0);
+ atomic_xchg(&q->polling,0);
if (used_elements)
return;
@@ -3458,19 +3451,18 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
/* This is the outbound handling of queues */
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.start_time_outbound=NOW;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.start_time_outbound=NOW;
qdio_do_qdio_fill_output(q,qidx,count,buffers);
used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
if (callflags&QDIO_FLAG_DONT_SIGA) {
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
- perf_stats.outbound_cnt++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
+ perf_stats.outbound_cnt++;
+ }
return;
}
if (q->is_iqdio_q) {
@@ -3500,9 +3492,8 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
qdio_kick_outbound_q(q);
} else {
QDIO_DBF_TEXT3(0,trace, "fast-req");
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.fast_reqs++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats)
+ perf_stats.fast_reqs++;
}
}
/*
@@ -3513,10 +3504,10 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
__qdio_outbound_processing(q);
}
-#ifdef QDIO_PERFORMANCE_STATS
- perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
- perf_stats.outbound_cnt++;
-#endif /* QDIO_PERFORMANCE_STATS */
+ if (qdio_performance_stats) {
+ perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
+ perf_stats.outbound_cnt++;
+ }
}
/* count must be 1 in iqdio */
@@ -3529,7 +3520,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
- sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
+ sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
@@ -3574,7 +3565,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
return 0;
}
-#ifdef QDIO_PERFORMANCE_STATS
static int
qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
int buffer_length, int *eof, void *data)
@@ -3590,29 +3580,29 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
_OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
_OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
_OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
- _OUTP_IT("Number of tasklet runs (total) : %u\n",
+ _OUTP_IT("Number of tasklet runs (total) : %lu\n",
perf_stats.tl_runs);
_OUTP_IT("\n");
- _OUTP_IT("Number of SIGA sync's issued : %u\n",
+ _OUTP_IT("Number of SIGA sync's issued : %lu\n",
perf_stats.siga_syncs);
- _OUTP_IT("Number of SIGA in's issued : %u\n",
+ _OUTP_IT("Number of SIGA in's issued : %lu\n",
perf_stats.siga_ins);
- _OUTP_IT("Number of SIGA out's issued : %u\n",
+ _OUTP_IT("Number of SIGA out's issued : %lu\n",
perf_stats.siga_outs);
- _OUTP_IT("Number of PCIs caught : %u\n",
+ _OUTP_IT("Number of PCIs caught : %lu\n",
perf_stats.pcis);
- _OUTP_IT("Number of adapter interrupts caught : %u\n",
+ _OUTP_IT("Number of adapter interrupts caught : %lu\n",
perf_stats.thinints);
- _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
+ _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n",
perf_stats.fast_reqs);
_OUTP_IT("\n");
- _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
+ _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n",
perf_stats.inbound_time);
- _OUTP_IT("Number of inbound transfers : %u\n",
+ _OUTP_IT("Number of inbound transfers : %lu\n",
perf_stats.inbound_cnt);
- _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
+ _OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n",
perf_stats.outbound_time);
- _OUTP_IT("Number of do_QDIOs outbound : %u\n",
+ _OUTP_IT("Number of do_QDIOs outbound : %lu\n",
perf_stats.outbound_cnt);
_OUTP_IT("\n");
@@ -3620,12 +3610,10 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
}
static struct proc_dir_entry *qdio_perf_proc_file;
-#endif /* QDIO_PERFORMANCE_STATS */
static void
qdio_add_procfs_entry(void)
{
-#ifdef QDIO_PERFORMANCE_STATS
proc_perf_file_registration=0;
qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
S_IFREG|0444,&proc_root);
@@ -3637,20 +3625,58 @@ qdio_add_procfs_entry(void)
QDIO_PRINT_WARN("was not able to register perf. " \
"proc-file (%i).\n",
proc_perf_file_registration);
-#endif /* QDIO_PERFORMANCE_STATS */
}
static void
qdio_remove_procfs_entry(void)
{
-#ifdef QDIO_PERFORMANCE_STATS
perf_stats.tl_runs=0;
if (!proc_perf_file_registration) /* means if it went ok earlier */
remove_proc_entry(QDIO_PERF,&proc_root);
-#endif /* QDIO_PERFORMANCE_STATS */
}
+/**
+ * attributes in sysfs
+ *****************************************************************************/
+
+static ssize_t
+qdio_performance_stats_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
+}
+
+static ssize_t
+qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ char *tmp;
+ int i;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i == 0) || (i == 1)) {
+ if (i == qdio_performance_stats)
+ return count;
+ qdio_performance_stats = i;
+ if (i==0) {
+ /* reset perf. stat. info */
+ i_p_nc = 0;
+ i_p_c = 0;
+ ii_p_nc = 0;
+ ii_p_c = 0;
+ o_p_nc = 0;
+ o_p_c = 0;
+ memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
+ }
+ } else {
+ QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
+ qdio_performance_stats_store);
+
static void
tiqdio_register_thinints(void)
{
@@ -3695,6 +3721,7 @@ qdio_release_qdio_memory(void)
kfree(indicators);
}
+
static void
qdio_unregister_dbf_views(void)
{
@@ -3796,9 +3823,7 @@ static int __init
init_QDIO(void)
{
int res;
-#ifdef QDIO_PERFORMANCE_STATS
void *ptr;
-#endif /* QDIO_PERFORMANCE_STATS */
printk("qdio: loading %s\n",version);
@@ -3811,13 +3836,12 @@ init_QDIO(void)
return res;
QDIO_DBF_TEXT0(0,setup,"initQDIO");
+ res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-#ifdef QDIO_PERFORMANCE_STATS
- memset((void*)&perf_stats,0,sizeof(perf_stats));
+ memset((void*)&perf_stats,0,sizeof(perf_stats));
QDIO_DBF_TEXT0(0,setup,"perfstat");
ptr=&perf_stats;
QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
-#endif /* QDIO_PERFORMANCE_STATS */
qdio_add_procfs_entry();
@@ -3841,7 +3865,7 @@ cleanup_QDIO(void)
qdio_release_qdio_memory();
qdio_unregister_dbf_views();
mempool_destroy(qdio_mempool_scssc);
-
+ bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
printk("qdio: %s: module removed\n",version);
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 49bb9e3..ec9af72 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -12,10 +12,6 @@
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE
-#ifdef CONFIG_QDIO_PERF_STATS
-#define QDIO_PERFORMANCE_STATS
-#endif /* CONFIG_QDIO_PERF_STATS */
-
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
@@ -236,7 +232,7 @@ enum qdio_irq_states {
#define QDIO_PRINT_EMERG(x...) do { } while (0)
#endif
-#define HEXDUMP16(importance,header,ptr) \
+#define QDIO_HEXDUMP16(importance,header,ptr) \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x\n",*(((char*)ptr)), \
@@ -409,27 +405,23 @@ do_clear_global_summary(void)
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
-#ifdef QDIO_PERFORMANCE_STATS
struct qdio_perf_stats {
- unsigned int tl_runs;
+ unsigned long tl_runs;
- unsigned int siga_outs;
- unsigned int siga_ins;
- unsigned int siga_syncs;
- unsigned int pcis;
- unsigned int thinints;
- unsigned int fast_reqs;
+ unsigned long siga_outs;
+ unsigned long siga_ins;
+ unsigned long siga_syncs;
+ unsigned long pcis;
+ unsigned long thinints;
+ unsigned long fast_reqs;
__u64 start_time_outbound;
- unsigned int outbound_cnt;
- unsigned int outbound_time;
+ unsigned long outbound_cnt;
+ unsigned long outbound_time;
__u64 start_time_inbound;
- unsigned int inbound_cnt;
- unsigned int inbound_time;
+ unsigned long inbound_cnt;
+ unsigned long inbound_time;
};
-#endif /* QDIO_PERFORMANCE_STATS */
-
-#define atomic_swap(a,b) xchg((int*)a.counter,b)
/* unlikely as the later the better */
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)