Merge branch 'e1000-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[pandora-kernel.git] / drivers / s390 / cio / chsc.c
index d99f525..ea92ac4 100644 (file)
 #include <linux/device.h>
 
 #include <asm/cio.h>
+#include <asm/chpid.h>
 
 #include "css.h"
 #include "cio.h"
 #include "cio_debug.h"
 #include "ioasm.h"
-#include "chpid.h"
 #include "chp.h"
 #include "chsc.h"
 
 static void *sei_page;
 
-/* FIXME: this is _always_ called for every subchannel. shouldn't we
- *       process more than one at a time? */
-static int
-chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
-{
-       int ccode, j;
+struct chsc_ssd_area {
+       struct chsc_header request;
+       u16 :10;
+       u16 ssid:2;
+       u16 :4;
+       u16 f_sch;        /* first subchannel */
+       u16 :16;
+       u16 l_sch;        /* last subchannel */
+       u32 :32;
+       struct chsc_header response;
+       u32 :32;
+       u8 sch_valid : 1;
+       u8 dev_valid : 1;
+       u8 st        : 3; /* subchannel type */
+       u8 zeroes    : 3;
+       u8  unit_addr;    /* unit address */
+       u16 devno;        /* device number */
+       u8 path_mask;
+       u8 fla_valid_mask;
+       u16 sch;          /* subchannel */
+       u8 chpid[8];      /* chpids 0-7 */
+       u16 fla[8];       /* full link addresses 0-7 */
+} __attribute__ ((packed));
 
-       struct {
-               struct chsc_header request;
-               u16 reserved1a:10;
-               u16 ssid:2;
-               u16 reserved1b:4;
-               u16 f_sch;        /* first subchannel */
-               u16 reserved2;
-               u16 l_sch;        /* last subchannel */
-               u32 reserved3;
-               struct chsc_header response;
-               u32 reserved4;
-               u8 sch_valid : 1;
-               u8 dev_valid : 1;
-               u8 st        : 3; /* subchannel type */
-               u8 zeroes    : 3;
-               u8  unit_addr;    /* unit address */
-               u16 devno;        /* device number */
-               u8 path_mask;
-               u8 fla_valid_mask;
-               u16 sch;          /* subchannel */
-               u8 chpid[8];      /* chpids 0-7 */
-               u16 fla[8];       /* full link addresses 0-7 */
-       } __attribute__ ((packed)) *ssd_area;
-
-       ssd_area = page;
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
+{
+       unsigned long page;
+       struct chsc_ssd_area *ssd_area;
+       int ccode;
+       int ret;
+       int i;
+       int mask;
 
+       page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+       if (!page)
+               return -ENOMEM;
+       ssd_area = (struct chsc_ssd_area *) page;
        ssd_area->request.length = 0x0010;
        ssd_area->request.code = 0x0004;
-
-       ssd_area->ssid = sch->schid.ssid;
-       ssd_area->f_sch = sch->schid.sch_no;
-       ssd_area->l_sch = sch->schid.sch_no;
+       ssd_area->ssid = schid.ssid;
+       ssd_area->f_sch = schid.sch_no;
+       ssd_area->l_sch = schid.sch_no;
 
        ccode = chsc(ssd_area);
+       /* Check response. */
        if (ccode > 0) {
-               pr_debug("chsc returned with ccode = %d\n", ccode);
-               return (ccode == 3) ? -ENODEV : -EBUSY;
+               ret = (ccode == 3) ? -ENODEV : -EBUSY;
+               goto out_free;
        }
-
-       switch (ssd_area->response.code) {
-       case 0x0001: /* everything ok */
-               break;
-       case 0x0002:
-               CIO_CRW_EVENT(2, "Invalid command!\n");
-               return -EINVAL;
-       case 0x0003:
-               CIO_CRW_EVENT(2, "Error in chsc request block!\n");
-               return -EINVAL;
-       case 0x0004:
-               CIO_CRW_EVENT(2, "Model does not provide ssd\n");
-               return -EOPNOTSUPP;
-       default:
-               CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+       if (ssd_area->response.code != 0x0001) {
+               CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+                             schid.ssid, schid.sch_no,
                              ssd_area->response.code);
-               return -EIO;
+               ret = -EIO;
+               goto out_free;
        }
-
-       /*
-        * ssd_area->st stores the type of the detected
-        * subchannel, with the following definitions:
-        *
-        * 0: I/O subchannel:     All fields have meaning
-        * 1: CHSC subchannel:    Only sch_val, st and sch
-        *                        have meaning
-        * 2: Message subchannel: All fields except unit_addr
-        *                        have meaning
-        * 3: ADM subchannel:     Only sch_val, st and sch
-        *                        have meaning
-        *
-        * Other types are currently undefined.
-        */
-       if (ssd_area->st > 3) { /* uhm, that looks strange... */
-               CIO_CRW_EVENT(0, "Strange subchannel type %d"
-                             " for sch 0.%x.%04x\n", ssd_area->st,
-                             sch->schid.ssid, sch->schid.sch_no);
-               /*
-                * There may have been a new subchannel type defined in the
-                * time since this code was written; since we don't know which
-                * fields have meaning and what to do with it we just jump out
-                */
-               return 0;
-       } else {
-               const char *type[4] = {"I/O", "chsc", "message", "ADM"};
-               CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
-                             sch->schid.ssid, sch->schid.sch_no,
-                             type[ssd_area->st]);
-
-               sch->ssd_info.valid = 1;
-               sch->ssd_info.type = ssd_area->st;
+       if (!ssd_area->sch_valid) {
+               ret = -ENODEV;
+               goto out_free;
        }
-
-       if (ssd_area->st == 0 || ssd_area->st == 2) {
-               for (j = 0; j < 8; j++) {
-                       if (!((0x80 >> j) & ssd_area->path_mask &
-                             ssd_area->fla_valid_mask))
-                               continue;
-                       sch->ssd_info.chpid[j] = ssd_area->chpid[j];
-                       sch->ssd_info.fla[j]   = ssd_area->fla[j];
+       /* Copy data */
+       ret = 0;
+       memset(ssd, 0, sizeof(struct chsc_ssd_info));
+       if ((ssd_area->st != 0) && (ssd_area->st != 2))
+               goto out_free;
+       ssd->path_mask = ssd_area->path_mask;
+       ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+       for (i = 0; i < 8; i++) {
+               mask = 0x80 >> i;
+               if (ssd_area->path_mask & mask) {
+                       chp_id_init(&ssd->chpid[i]);
+                       ssd->chpid[i].id = ssd_area->chpid[i];
                }
+               if (ssd_area->fla_valid_mask & mask)
+                       ssd->fla[i] = ssd_area->fla[i];
        }
-       return 0;
+out_free:
+       free_page(page);
+       return ret;
 }
 
-int
-css_get_ssd_info(struct subchannel *sch)
+static int check_for_io_on_path(struct subchannel *sch, int mask)
 {
-       int ret;
-       void *page;
+       int cc;
 
-       page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
-       if (!page)
-               return -ENOMEM;
-       spin_lock_irq(sch->lock);
-       ret = chsc_get_sch_desc_irq(sch, page);
-       if (ret) {
-               static int cio_chsc_err_msg;
-               
-               if (!cio_chsc_err_msg) {
-                       printk(KERN_ERR
-                              "chsc_get_sch_descriptions:"
-                              " Error %d while doing chsc; "
-                              "processing some machine checks may "
-                              "not work\n", ret);
-                       cio_chsc_err_msg = 1;
-               }
-       }
-       spin_unlock_irq(sch->lock);
-       free_page((unsigned long)page);
-       if (!ret) {
-               int j, mask;
-               struct chp_id chpid;
+       cc = stsch(sch->schid, &sch->schib);
+       if (cc)
+               return 0;
+       if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
+               return 1;
+       return 0;
+}
 
-               chp_id_init(&chpid);
-               /* Allocate channel path structures, if needed. */
-               for (j = 0; j < 8; j++) {
-                       mask = 0x80 >> j;
-                       chpid.id = sch->ssd_info.chpid[j];
-                       if ((sch->schib.pmcw.pim & mask) &&
-                           !chp_is_registered(chpid))
-                               chp_new(chpid);
-               }
+static void terminate_internal_io(struct subchannel *sch)
+{
+       if (cio_clear(sch)) {
+               /* Recheck device in case clear failed. */
+               sch->lpm = 0;
+               if (device_trigger_verify(sch) != 0)
+                       css_schedule_eval(sch->schid);
+               return;
        }
-       return ret;
+       /* Request retry of internal operation. */
+       device_set_intretry(sch);
+       /* Call handler. */
+       if (sch->driver && sch->driver->termination)
+               sch->driver->termination(&sch->dev);
 }
 
 static int
@@ -208,37 +165,30 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
        if (sch->schib.pmcw.pim == 0x80)
                goto out_unreg;
 
-       if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
-           (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
-           (sch->schib.pmcw.lpum == mask)) {
-               int cc;
-
-               cc = cio_clear(sch);
-               if (cc == -ENODEV)
+       if (check_for_io_on_path(sch, mask)) {
+               if (device_is_online(sch))
+                       device_kill_io(sch);
+               else {
+                       terminate_internal_io(sch);
+                       /* Re-start path verification. */
+                       if (sch->driver && sch->driver->verify)
+                               sch->driver->verify(&sch->dev);
+               }
+       } else {
+               /* trigger path verification. */
+               if (sch->driver && sch->driver->verify)
+                       sch->driver->verify(&sch->dev);
+               else if (sch->lpm == mask)
                        goto out_unreg;
-               /* Request retry of internal operation. */
-               device_set_intretry(sch);
-               /* Call handler. */
-               if (sch->driver && sch->driver->termination)
-                       sch->driver->termination(&sch->dev);
-               goto out_unlock;
        }
 
-       /* trigger path verification. */
-       if (sch->driver && sch->driver->verify)
-               sch->driver->verify(&sch->dev);
-       else if (sch->lpm == mask)
-               goto out_unreg;
-out_unlock:
        spin_unlock_irq(sch->lock);
        return 0;
+
 out_unreg:
-       spin_unlock_irq(sch->lock);
        sch->lpm = 0;
-       if (css_enqueue_subchannel_slow(sch->schid)) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-       }
+       spin_unlock_irq(sch->lock);
+       css_schedule_eval(sch->schid);
        return 0;
 }
 
@@ -253,57 +203,12 @@ void chsc_chp_offline(struct chp_id chpid)
                return;
        bus_for_each_dev(&css_bus_type, NULL, &chpid,
                         s390_subchannel_remove_chpid);
-
-       if (need_rescan || css_slow_subchannels_exist())
-               queue_work(slow_path_wq, &slow_path_work);
-}
-
-struct res_acc_data {
-       struct chp_id chpid;
-       u32 fla_mask;
-       u16 fla;
-};
-
-static int s390_process_res_acc_sch(struct res_acc_data *res_data,
-                                   struct subchannel *sch)
-{
-       int found;
-       int chp;
-       int ccode;
-
-       found = 0;
-       for (chp = 0; chp <= 7; chp++)
-               /*
-                * check if chpid is in information updated by ssd
-                */
-               if (sch->ssd_info.valid &&
-                   sch->ssd_info.chpid[chp] == res_data->chpid.id &&
-                   (sch->ssd_info.fla[chp] & res_data->fla_mask)
-                   == res_data->fla) {
-                       found = 1;
-                       break;
-               }
-
-       if (found == 0)
-               return 0;
-
-       /*
-        * Do a stsch to update our subchannel structure with the
-        * new path information and eventually check for logically
-        * offline chpids.
-        */
-       ccode = stsch(sch->schid, &sch->schib);
-       if (ccode > 0)
-               return 0;
-
-       return 0x80 >> chp;
 }
 
 static int
 s390_process_res_acc_new_sch(struct subchannel_id schid)
 {
        struct schib schib;
-       int ret;
        /*
         * We don't know the device yet, but since a path
         * may be available now to the device we'll have
@@ -314,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
         */
        if (stsch_err(schid, &schib))
                /* We're through */
-               return need_rescan ? -EAGAIN : -ENXIO;
+               return -ENXIO;
 
        /* Put it on the slow path. */
-       ret = css_enqueue_subchannel_slow(schid);
-       if (ret) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-               return -EAGAIN;
+       css_schedule_eval(schid);
+       return 0;
+}
+
+struct res_acc_data {
+       struct chp_id chpid;
+       u32 fla_mask;
+       u16 fla;
+};
+
+static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
+                             struct res_acc_data *data)
+{
+       int i;
+       int mask;
+
+       for (i = 0; i < 8; i++) {
+               mask = 0x80 >> i;
+               if (!(ssd->path_mask & mask))
+                       continue;
+               if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
+                       continue;
+               if ((ssd->fla_valid_mask & mask) &&
+                   ((ssd->fla[i] & data->fla_mask) != data->fla))
+                       continue;
+               return mask;
        }
        return 0;
 }
@@ -340,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
                return s390_process_res_acc_new_sch(schid);
 
        spin_lock_irq(sch->lock);
-
-       chp_mask = s390_process_res_acc_sch(res_data, sch);
-
-       if (chp_mask == 0) {
-               spin_unlock_irq(sch->lock);
-               put_device(&sch->dev);
-               return 0;
-       }
+       chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
+       if (chp_mask == 0)
+               goto out;
+       if (stsch(sch->schid, &sch->schib))
+               goto out;
        old_lpm = sch->lpm;
        sch->lpm = ((sch->schib.pmcw.pim &
                     sch->schib.pmcw.pam &
@@ -357,17 +280,14 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
                device_trigger_reprobe(sch);
        else if (sch->driver && sch->driver->verify)
                sch->driver->verify(&sch->dev);
-
+out:
        spin_unlock_irq(sch->lock);
        put_device(&sch->dev);
        return 0;
 }
 
-
-static int
-s390_process_res_acc (struct res_acc_data *res_data)
+static void s390_process_res_acc (struct res_acc_data *res_data)
 {
-       int rc;
        char dbf_txt[15];
 
        sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
@@ -385,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
         * The more information we have (info), the less scanning
         * will we have to do.
         */
-       rc = for_each_subchannel(__s390_process_res_acc, res_data);
-       if (css_slow_subchannels_exist())
-               rc = -EAGAIN;
-       else if (rc != -EAGAIN)
-               rc = 0;
-       return rc;
+       for_each_subchannel(__s390_process_res_acc, res_data);
 }
 
 static int
@@ -442,7 +357,7 @@ struct chsc_sei_area {
        /* ccdf has to be big enough for a link-incident record */
 } __attribute__ ((packed));
 
-static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
 {
        struct chp_id chpid;
        int id;
@@ -450,7 +365,7 @@ static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
        CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
                      sei_area->rs, sei_area->rsid);
        if (sei_area->rs != 4)
-               return 0;
+               return;
        id = __get_chpid_from_lir(sei_area->ccdf);
        if (id < 0)
                CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
@@ -459,21 +374,18 @@ static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
                chpid.id = id;
                chsc_chp_offline(chpid);
        }
-
-       return 0;
 }
 
-static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
 {
        struct res_acc_data res_data;
        struct chp_id chpid;
        int status;
-       int rc;
 
        CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
                      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
        if (sei_area->rs != 4)
-               return 0;
+               return;
        chp_id_init(&chpid);
        chpid.id = sei_area->rsid;
        /* allocate a new channel path structure, if needed */
@@ -481,7 +393,7 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
        if (status < 0)
                chp_new(chpid);
        else if (!status)
-               return 0;
+               return;
        memset(&res_data, 0, sizeof(struct res_acc_data));
        res_data.chpid = chpid;
        if ((sei_area->vf & 0xc0) != 0) {
@@ -493,50 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
                        /* link address */
                        res_data.fla_mask = 0xff00;
        }
-       rc = s390_process_res_acc(&res_data);
-
-       return rc;
+       s390_process_res_acc(&res_data);
 }
 
-static int chsc_process_sei(struct chsc_sei_area *sei_area)
+struct chp_config_data {
+       u8 map[32];
+       u8 op;
+       u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
 {
-       int rc;
+       struct chp_config_data *data;
+       struct chp_id chpid;
+       int num;
+
+       CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+       if (sei_area->rs != 0)
+               return;
+       data = (struct chp_config_data *) &(sei_area->ccdf);
+       chp_id_init(&chpid);
+       for (num = 0; num <= __MAX_CHPID; num++) {
+               if (!chp_test_bit(data->map, num))
+                       continue;
+               chpid.id = num;
+               printk(KERN_WARNING "cio: processing configure event %d for "
+                      "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
+               switch (data->op) {
+               case 0:
+                       chp_cfg_schedule(chpid, 1);
+                       break;
+               case 1:
+                       chp_cfg_schedule(chpid, 0);
+                       break;
+               case 2:
+                       chp_cfg_cancel_deconfigure(chpid);
+                       break;
+               }
+       }
+}
 
+static void chsc_process_sei(struct chsc_sei_area *sei_area)
+{
        /* Check if we might have lost some information. */
-       if (sei_area->flags & 0x40)
+       if (sei_area->flags & 0x40) {
                CIO_CRW_EVENT(2, "chsc: event overflow\n");
+               css_schedule_eval_all();
+       }
        /* which kind of information was stored? */
-       rc = 0;
        switch (sei_area->cc) {
        case 1: /* link incident*/
-               rc = chsc_process_sei_link_incident(sei_area);
+               chsc_process_sei_link_incident(sei_area);
                break;
        case 2: /* i/o resource accessibiliy */
-               rc = chsc_process_sei_res_acc(sei_area);
+               chsc_process_sei_res_acc(sei_area);
+               break;
+       case 8: /* channel-path-configuration notification */
+               chsc_process_sei_chp_config(sei_area);
                break;
        default: /* other stuff */
                CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
                              sei_area->cc);
                break;
        }
-
-       return rc;
 }
 
-int chsc_process_crw(void)
+void chsc_process_crw(void)
 {
        struct chsc_sei_area *sei_area;
-       int ret;
-       int rc;
 
        if (!sei_page)
-               return 0;
+               return;
        /* Access to sei_page is serialized through machine check handler
         * thread, so no need for locking. */
        sei_area = sei_page;
 
        CIO_TRACE_EVENT( 2, "prcss");
-       ret = 0;
        do {
                memset(sei_area, 0, sizeof(*sei_area));
                sei_area->request.length = 0x0010;
@@ -546,37 +490,26 @@ int chsc_process_crw(void)
 
                if (sei_area->response.code == 0x0001) {
                        CIO_CRW_EVENT(4, "chsc: sei successful\n");
-                       rc = chsc_process_sei(sei_area);
-                       if (rc)
-                               ret = rc;
+                       chsc_process_sei(sei_area);
                } else {
                        CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
                                      sei_area->response.code);
-                       ret = 0;
                        break;
                }
        } while (sei_area->flags & 0x80);
-
-       return ret;
 }
 
 static int
 __chp_add_new_sch(struct subchannel_id schid)
 {
        struct schib schib;
-       int ret;
 
        if (stsch_err(schid, &schib))
                /* We're through */
-               return need_rescan ? -EAGAIN : -ENXIO;
+               return -ENXIO;
 
        /* Put it on the slow path. */
-       ret = css_enqueue_subchannel_slow(schid);
-       if (ret) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-               return -EAGAIN;
-       }
+       css_schedule_eval(schid);
        return 0;
 }
 
@@ -623,96 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
        return 0;
 }
 
-int chsc_chp_online(struct chp_id chpid)
+void chsc_chp_online(struct chp_id chpid)
 {
-       int rc;
        char dbf_txt[15];
 
        sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
 
-       if (chp_get_status(chpid) == 0)
-               return 0;
-       rc = for_each_subchannel(__chp_add, &chpid);
-       if (css_slow_subchannels_exist())
-               rc = -EAGAIN;
-       if (rc != -EAGAIN)
-               rc = 0;
-       return rc;
-}
-
-static int check_for_io_on_path(struct subchannel *sch, int index)
-{
-       int cc;
-
-       cc = stsch(sch->schid, &sch->schib);
-       if (cc)
-               return 0;
-       if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
-               return 1;
-       return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch)
-{
-       if (cio_clear(sch)) {
-               /* Recheck device in case clear failed. */
-               sch->lpm = 0;
-               if (device_trigger_verify(sch) != 0) {
-                       if(css_enqueue_subchannel_slow(sch->schid)) {
-                               css_clear_subchannel_slow_list();
-                               need_rescan = 1;
-                       }
-               }
-               return;
-       }
-       /* Request retry of internal operation. */
-       device_set_intretry(sch);
-       /* Call handler. */
-       if (sch->driver && sch->driver->termination)
-               sch->driver->termination(&sch->dev);
+       if (chp_get_status(chpid) != 0)
+               for_each_subchannel(__chp_add, &chpid);
 }
 
 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
                                         struct chp_id chpid, int on)
 {
        int chp, old_lpm;
+       int mask;
        unsigned long flags;
 
-       if (!sch->ssd_info.valid)
-               return;
-       
        spin_lock_irqsave(sch->lock, flags);
        old_lpm = sch->lpm;
        for (chp = 0; chp < 8; chp++) {
-               if (sch->ssd_info.chpid[chp] != chpid.id)
+               mask = 0x80 >> chp;
+               if (!(sch->ssd_info.path_mask & mask))
+                       continue;
+               if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
                        continue;
 
                if (on) {
-                       sch->opm |= (0x80 >> chp);
-                       sch->lpm |= (0x80 >> chp);
+                       sch->opm |= mask;
+                       sch->lpm |= mask;
                        if (!old_lpm)
                                device_trigger_reprobe(sch);
                        else if (sch->driver && sch->driver->verify)
                                sch->driver->verify(&sch->dev);
                        break;
                }
-               sch->opm &= ~(0x80 >> chp);
-               sch->lpm &= ~(0x80 >> chp);
-               if (check_for_io_on_path(sch, chp)) {
+               sch->opm &= ~mask;
+               sch->lpm &= ~mask;
+               if (check_for_io_on_path(sch, mask)) {
                        if (device_is_online(sch))
                                /* Path verification is done after killing. */
                                device_kill_io(sch);
-                       else
+                       else {
                                /* Kill and retry internal I/O. */
                                terminate_internal_io(sch);
-               } else if (!sch->lpm) {
-                       if (device_trigger_verify(sch) != 0) {
-                               if (css_enqueue_subchannel_slow(sch->schid)) {
-                                       css_clear_subchannel_slow_list();
-                                       need_rescan = 1;
-                               }
+                               /* Re-start path verification. */
+                               if (sch->driver && sch->driver->verify)
+                                       sch->driver->verify(&sch->dev);
                        }
+               } else if (!sch->lpm) {
+                       if (device_trigger_verify(sch) != 0)
+                               css_schedule_eval(sch->schid);
                } else if (sch->driver && sch->driver->verify)
                        sch->driver->verify(&sch->dev);
                break;
@@ -759,11 +654,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
                /* We're through */
                return -ENXIO;
        /* Put it on the slow path. */
-       if (css_enqueue_subchannel_slow(schid)) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-               return -EAGAIN;
-       }
+       css_schedule_eval(schid);
        return 0;
 }
 
@@ -784,8 +675,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
        if (on)
                /* Scan for new devices on varied on path. */
                for_each_subchannel(__s390_vary_chpid_on, NULL);
-       if (need_rescan || css_slow_subchannels_exist())
-               queue_work(slow_path_wq, &slow_path_work);
        return 0;
 }
 
@@ -929,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
                } else
                        chsc_remove_cmg_attr(css);
        }
-       if (enable && !css->cm_enabled) {
+       if (!css->cm_enabled) {
                free_page((unsigned long)css->cub_addr1);
                free_page((unsigned long)css->cub_addr2);
        }