commit
cb6ef42e516cb8948f15e4b70dc03af8020050a2 upstream.
We're using edac_mc_workq_setup() both on the init path, when
we load an edac driver and when we change the polling period
(edac_mc_reset_delay_period) through /sys/.../edac_mc_poll_msec.
On that second path we don't need to init the workqueue which has been
initialized already.
Thanks to Tejun for workqueue insights.
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/1391457913-881-1-git-send-email-prarit@redhat.com
[bwh: Backported to 3.2: adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
*
* called with the mem_ctls_mutex held
*/
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
debugf0("%s()\n", __func__);
{
debugf0("%s()\n", __func__);
if (mci->op_state != OP_RUNNING_POLL)
return;
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
}
mutex_unlock(&mem_ctls_mutex);
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}