Revert "clk: Get runtime PM before walking tree during disable_unused"
This reverts commit 253ab38d1e which is
commit e581cf5d216289ef292d1a4036d53ce90e122469 upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: Ic3614277b0ce1e1eec3488d1f3db21cc538aa3d4
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
65d8957031
commit
b9e6f92358
1 changed files with 12 additions and 105 deletions
|
|
@ -37,10 +37,6 @@ static HLIST_HEAD(clk_root_list);
|
|||
static HLIST_HEAD(clk_orphan_list);
|
||||
static LIST_HEAD(clk_notifier_list);
|
||||
|
||||
/* List of registered clks that use runtime PM */
|
||||
static HLIST_HEAD(clk_rpm_list);
|
||||
static DEFINE_MUTEX(clk_rpm_list_lock);
|
||||
|
||||
static const struct hlist_head *all_lists[] = {
|
||||
&clk_root_list,
|
||||
&clk_orphan_list,
|
||||
|
|
@ -63,7 +59,6 @@ struct clk_core {
|
|||
struct clk_hw *hw;
|
||||
struct module *owner;
|
||||
struct device *dev;
|
||||
struct hlist_node rpm_node;
|
||||
struct device_node *of_node;
|
||||
struct clk_core *parent;
|
||||
struct clk_parent_map *parents;
|
||||
|
|
@ -136,89 +131,6 @@ static void clk_pm_runtime_put(struct clk_core *core)
|
|||
pm_runtime_put_sync(core->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
|
||||
*
|
||||
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
|
||||
* that disabling unused clks avoids a deadlock where a device is runtime PM
|
||||
* resuming/suspending and the runtime PM callback is trying to grab the
|
||||
* prepare_lock for something like clk_prepare_enable() while
|
||||
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
|
||||
* PM resume/suspend the device as well.
|
||||
*
|
||||
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
|
||||
* success. Otherwise the lock is released on failure.
|
||||
*
|
||||
* Return: 0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int clk_pm_runtime_get_all(void)
|
||||
{
|
||||
int ret;
|
||||
struct clk_core *core, *failed;
|
||||
|
||||
/*
|
||||
* Grab the list lock to prevent any new clks from being registered
|
||||
* or unregistered until clk_pm_runtime_put_all().
|
||||
*/
|
||||
mutex_lock(&clk_rpm_list_lock);
|
||||
|
||||
/*
|
||||
* Runtime PM "get" all the devices that are needed for the clks
|
||||
* currently registered. Do this without holding the prepare_lock, to
|
||||
* avoid the deadlock.
|
||||
*/
|
||||
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
|
||||
ret = clk_pm_runtime_get(core);
|
||||
if (ret) {
|
||||
failed = core;
|
||||
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
|
||||
dev_name(failed->dev), failed->name);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
|
||||
if (core == failed)
|
||||
break;
|
||||
|
||||
clk_pm_runtime_put(core);
|
||||
}
|
||||
mutex_unlock(&clk_rpm_list_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
|
||||
*
|
||||
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
|
||||
* the 'clk_rpm_list_lock'.
|
||||
*/
|
||||
static void clk_pm_runtime_put_all(void)
|
||||
{
|
||||
struct clk_core *core;
|
||||
|
||||
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
|
||||
clk_pm_runtime_put(core);
|
||||
mutex_unlock(&clk_rpm_list_lock);
|
||||
}
|
||||
|
||||
static void clk_pm_runtime_init(struct clk_core *core)
|
||||
{
|
||||
struct device *dev = core->dev;
|
||||
|
||||
if (dev && pm_runtime_enabled(dev)) {
|
||||
core->rpm_enabled = true;
|
||||
|
||||
mutex_lock(&clk_rpm_list_lock);
|
||||
hlist_add_head(&core->rpm_node, &clk_rpm_list);
|
||||
mutex_unlock(&clk_rpm_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*** locking ***/
|
||||
static void clk_prepare_lock(void)
|
||||
{
|
||||
|
|
@ -1331,6 +1243,9 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
|
|||
if (core->flags & CLK_IGNORE_UNUSED)
|
||||
return;
|
||||
|
||||
if (clk_pm_runtime_get(core))
|
||||
return;
|
||||
|
||||
if (clk_core_is_prepared(core)) {
|
||||
trace_clk_unprepare(core);
|
||||
if (core->ops->unprepare_unused)
|
||||
|
|
@ -1339,6 +1254,8 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
|
|||
core->ops->unprepare(core->hw);
|
||||
trace_clk_unprepare_complete(core);
|
||||
}
|
||||
|
||||
clk_pm_runtime_put(core);
|
||||
}
|
||||
|
||||
static void clk_disable_unused_subtree(struct clk_core *core)
|
||||
|
|
@ -1358,6 +1275,9 @@ static void clk_disable_unused_subtree(struct clk_core *core)
|
|||
if (core->flags & CLK_OPS_PARENT_ENABLE)
|
||||
clk_core_prepare_enable(core->parent);
|
||||
|
||||
if (clk_pm_runtime_get(core))
|
||||
goto unprepare_out;
|
||||
|
||||
flags = clk_enable_lock();
|
||||
|
||||
if (core->enable_count)
|
||||
|
|
@ -1382,6 +1302,8 @@ static void clk_disable_unused_subtree(struct clk_core *core)
|
|||
|
||||
unlock_out:
|
||||
clk_enable_unlock(flags);
|
||||
clk_pm_runtime_put(core);
|
||||
unprepare_out:
|
||||
if (core->flags & CLK_OPS_PARENT_ENABLE)
|
||||
clk_core_disable_unprepare(core->parent);
|
||||
}
|
||||
|
|
@ -1397,7 +1319,6 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
|
|||
static int clk_disable_unused(void)
|
||||
{
|
||||
struct clk_core *core;
|
||||
int ret;
|
||||
|
||||
if (clk_ignore_unused) {
|
||||
pr_warn("clk: Not disabling unused clocks\n");
|
||||
|
|
@ -1406,13 +1327,6 @@ static int clk_disable_unused(void)
|
|||
|
||||
pr_info("clk: Disabling unused clocks\n");
|
||||
|
||||
ret = clk_pm_runtime_get_all();
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
* Grab the prepare lock to keep the clk topology stable while iterating
|
||||
* over clks.
|
||||
*/
|
||||
clk_prepare_lock();
|
||||
|
||||
hlist_for_each_entry(core, &clk_root_list, child_node)
|
||||
|
|
@ -1429,8 +1343,6 @@ static int clk_disable_unused(void)
|
|||
|
||||
clk_prepare_unlock();
|
||||
|
||||
clk_pm_runtime_put_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall_sync(clk_disable_unused);
|
||||
|
|
@ -3894,12 +3806,6 @@ static void __clk_release(struct kref *ref)
|
|||
{
|
||||
struct clk_core *core = container_of(ref, struct clk_core, ref);
|
||||
|
||||
if (core->rpm_enabled) {
|
||||
mutex_lock(&clk_rpm_list_lock);
|
||||
hlist_del(&core->rpm_node);
|
||||
mutex_unlock(&clk_rpm_list_lock);
|
||||
}
|
||||
|
||||
clk_core_free_parent_map(core);
|
||||
kfree_const(core->name);
|
||||
kfree(core);
|
||||
|
|
@ -3939,8 +3845,9 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
|||
}
|
||||
core->ops = init->ops;
|
||||
|
||||
if (dev && pm_runtime_enabled(dev))
|
||||
core->rpm_enabled = true;
|
||||
core->dev = dev;
|
||||
clk_pm_runtime_init(core);
|
||||
core->of_node = np;
|
||||
if (dev && dev->driver)
|
||||
core->owner = dev->driver->owner;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue