soc/qcom/hyp_core_ctl: Track VPM Group state for VM

VM has to go into suspend state outside of use case boundary. Delay in
transitioning to system suspend or spurious wake-ups after the use case
is completed will result in a system regression.
Adding a mechanism to track both the scenarios of delay in system
suspend and spurious wake-ups of the VM by checking the VPM Group state
associated with the VM.

Change-Id: I2a5990989033ffd3cc57ae1cba93b8ba6b63a3c7
Signed-off-by: Sai Harshini Nimmala <snimmala@codeaurora.org>
This commit is contained in:
Sai Harshini Nimmala 2020-09-18 12:53:34 -07:00
parent fa2c926449
commit f89f67ece0
5 changed files with 134 additions and 14 deletions

View file

@ -21,16 +21,21 @@
#include <linux/debugfs.h>
#include <linux/pm_qos.h>
#include <linux/cpufreq.h>
#include <linux/sched/sysctl.h>
#include <linux/haven/hcall.h>
#include <linux/haven/hh_errno.h>
#include <linux/haven/hh_rm_drv.h>
#define MAX_RESERVE_CPUS (num_possible_cpus()/2)
#define SVM_STATE_RUNNING 1
#define SVM_STATE_CPUS_SUSPENDED 2
#define SVM_STATE_SYSTEM_SUSPENDED 3
static DEFINE_PER_CPU(struct freq_qos_request, qos_min_req);
static DEFINE_PER_CPU(unsigned int, qos_min_freq);
unsigned int sysctl_hh_suspend_timeout_ms = 1000;
/**
* struct hyp_core_ctl_cpumap - vcpu to pcpu mapping for the other guest
* @cap_id: System call id to be used while referring to this vcpu
@ -82,6 +87,9 @@ static bool is_vcpu_info_populated;
static bool init_done;
static int nr_vcpus;
static bool freq_qos_init_done;
static u64 vpmg_cap_id;
static struct timer_list hh_suspend_timer;
static bool is_vpm_group_info_populated;
static inline void hyp_core_ctl_print_status(char *msg)
{
@ -413,20 +421,21 @@ done:
static int hyp_core_ctl_thread(void *data)
{
struct hyp_core_ctl_data *hcd = data;
unsigned long flags;
while (1) {
spin_lock(&hcd->lock);
spin_lock_irqsave(&hcd->lock, flags);
if (!hcd->pending) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&hcd->lock);
spin_unlock_irqrestore(&hcd->lock, flags);
schedule();
spin_lock(&hcd->lock);
spin_lock_irqsave(&hcd->lock, flags);
set_current_state(TASK_RUNNING);
}
hcd->pending = false;
spin_unlock(&hcd->lock);
spin_unlock_irqrestore(&hcd->lock, flags);
if (kthread_should_stop())
break;
@ -510,6 +519,7 @@ static int hyp_core_ctl_cpu_cooling_cb(struct notifier_block *nb,
const cpumask_t *thermal_cpus = cpu_cooling_get_max_level_cpumask();
struct freq_qos_request *qos_req;
int ret;
unsigned long flags;
if (!the_hcd)
return NOTIFY_DONE;
@ -565,10 +575,10 @@ static int hyp_core_ctl_cpu_cooling_cb(struct notifier_block *nb,
}
if (the_hcd->reservation_enabled) {
spin_lock(&the_hcd->lock);
spin_lock_irqsave(&the_hcd->lock, flags);
the_hcd->pending = true;
wake_up_process(the_hcd->task);
spin_unlock(&the_hcd->lock);
spin_unlock_irqrestore(&the_hcd->lock, flags);
} else {
/*
* When the reservation is enabled, the state machine
@ -619,6 +629,8 @@ static int hyp_core_ctl_hp_offline(unsigned int cpu)
static int hyp_core_ctl_hp_online(unsigned int cpu)
{
unsigned long flags;
if (!the_hcd || !the_hcd->reservation_enabled)
return 0;
@ -626,12 +638,12 @@ static int hyp_core_ctl_hp_online(unsigned int cpu)
* A reserved CPU is coming online. It should be isolated
* to honor the reservation. So kick the state machine.
*/
spin_lock(&the_hcd->lock);
spin_lock_irqsave(&the_hcd->lock, flags);
if (cpumask_test_cpu(cpu, &the_hcd->final_reserved_cpus)) {
the_hcd->pending = true;
wake_up_process(the_hcd->task);
}
spin_unlock(&the_hcd->lock);
spin_unlock_irqrestore(&the_hcd->lock, flags);
return 0;
}
@ -639,8 +651,9 @@ static int hyp_core_ctl_hp_online(unsigned int cpu)
static void hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd)
{
int i;
unsigned long flags;
spin_lock(&hcd->lock);
spin_lock_irqsave(&hcd->lock, flags);
cpumask_clear(&hcd->reserve_cpus);
for (i = 0; i < MAX_RESERVE_CPUS; i++) {
@ -655,7 +668,7 @@ static void hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd)
}
cpumask_copy(&hcd->final_reserved_cpus, &hcd->reserve_cpus);
spin_unlock(&hcd->lock);
spin_unlock_irqrestore(&hcd->lock, flags);
pr_info("reserve_cpus=%*pbl\n", cpumask_pr_args(&hcd->reserve_cpus));
}
@ -705,18 +718,99 @@ static struct notifier_block hh_vcpu_nb = {
.notifier_call = hh_vcpu_done_populate_affinity_info,
};
static void hh_suspend_timer_callback(struct timer_list *t)
{
pr_err("Warning:%ums timeout occurred while waiting for SVM suspend\n",
sysctl_hh_suspend_timeout_ms);
}
static inline void hh_del_suspend_timer(void)
{
del_timer(&hh_suspend_timer);
}
static inline void hh_start_suspend_timer(void)
{
mod_timer(&hh_suspend_timer, jiffies +
msecs_to_jiffies(sysctl_hh_suspend_timeout_ms));
}
static irqreturn_t hh_susp_res_irq_handler(int irq, void *data)
{
int err;
uint64_t vpmg_state;
unsigned long flags;
err = hh_hcall_vpm_group_get_state(vpmg_cap_id, &vpmg_state);
if (err != HH_ERROR_OK) {
pr_err("Failed to get VPM Group state for cap_id=%llu err=%d\n",
vpmg_cap_id, err);
return IRQ_HANDLED;
}
spin_lock_irqsave(&the_hcd->lock, flags);
if (vpmg_state == SVM_STATE_RUNNING) {
if (!the_hcd->reservation_enabled)
pr_err_ratelimited("Reservation not enabled,unexpected SVM wake up\n");
} else if (vpmg_state == SVM_STATE_SYSTEM_SUSPENDED) {
hh_del_suspend_timer();
} else {
pr_err("VPM Group state invalid/non-existent\n");
}
spin_unlock_irqrestore(&the_hcd->lock, flags);
return IRQ_HANDLED;
}
int hh_vpm_grp_populate_info(u64 cap_id, int virq_num)
{
int ret = 0;
if (!init_done) {
pr_err("%s: Driver probe failed\n", __func__);
return -ENXIO;
}
if (virq_num < 0) {
pr_err("%s: Invalid IRQ number\n", __func__);
return -EINVAL;
}
vpmg_cap_id = cap_id;
ret = request_irq(virq_num, hh_susp_res_irq_handler, 0,
"hh_susp_res_irq", NULL);
if (ret < 0) {
pr_err("%s: IRQ registration failed ret=%d\n", __func__, ret);
return ret;
}
timer_setup(&hh_suspend_timer, hh_suspend_timer_callback, 0);
is_vpm_group_info_populated = true;
return ret;
}
static void hyp_core_ctl_enable(bool enable)
{
unsigned long flags;
mutex_lock(&the_hcd->reservation_mutex);
if (!is_vcpu_info_populated) {
pr_err("VCPU info isn't populated\n");
goto err_out;
}
spin_lock(&the_hcd->lock);
spin_lock_irqsave(&the_hcd->lock, flags);
if (enable == the_hcd->reservation_enabled)
goto out;
if (is_vpm_group_info_populated) {
if (enable)
hh_del_suspend_timer();
else
hh_start_suspend_timer();
}
trace_hyp_core_ctl_enable(enable);
pr_debug("reservation %s\n", enable ? "enabled" : "disabled");
@ -724,7 +818,7 @@ static void hyp_core_ctl_enable(bool enable)
the_hcd->pending = true;
wake_up_process(the_hcd->task);
out:
spin_unlock(&the_hcd->lock);
spin_unlock_irqrestore(&the_hcd->lock, flags);
err_out:
mutex_unlock(&the_hcd->reservation_mutex);
}
@ -941,6 +1035,7 @@ static ssize_t write_reserve_cpus(struct file *file, const char __user *ubuf,
char kbuf[CPULIST_SZ];
int ret;
cpumask_t temp_mask;
unsigned long flags;
mutex_lock(&the_hcd->reservation_mutex);
if (!is_vcpu_info_populated) {
@ -966,14 +1061,14 @@ static ssize_t write_reserve_cpus(struct file *file, const char __user *ubuf,
goto err_out;
}
spin_lock(&the_hcd->lock);
spin_lock_irqsave(&the_hcd->lock, flags);
if (the_hcd->reservation_enabled) {
count = -EPERM;
pr_err("reservation is enabled, can't change reserve_cpus\n");
} else {
cpumask_copy(&the_hcd->reserve_cpus, &temp_mask);
}
spin_unlock(&the_hcd->lock);
spin_unlock_irqrestore(&the_hcd->lock, flags);
mutex_unlock(&the_hcd->reservation_mutex);
return count;

View file

@ -806,6 +806,8 @@ int hh_rm_populate_hyp_res(hh_vmid_t vmid)
HH_MSGQ_DIRECTION_RX, linux_irq);
break;
case HH_RM_RES_TYPE_VPMGRP:
ret = hh_vpm_grp_populate_info(cap_id,
linux_irq);
break;
default:
pr_err("%s: Unknown resource type: %u\n",

View file

@ -537,11 +537,16 @@ DECLARE_PER_CPU_READ_MOSTLY(int, sched_load_boost);
#ifdef CONFIG_QCOM_HYP_CORE_CTL
extern int hh_vcpu_populate_affinity_info(u32 cpu_index, u64 cap_id);
extern int hh_vpm_grp_populate_info(u64 cap_id, int virq_num);
#else
static inline int hh_vcpu_populate_affinity_info(u32 cpu_index, u64 cap_id)
{
return 0;
}
static inline int hh_vpm_grp_populate_info(u64 cap_id, int virq_num)
{
return 0;
}
#endif /* CONFIG_QCOM_HYP_CORE_CTL */
#ifdef CONFIG_SCHED_WALT

View file

@ -30,6 +30,9 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_force_lb_enable;
#ifdef CONFIG_QCOM_HYP_CORE_CTL
extern unsigned int sysctl_hh_suspend_timeout_ms;
#endif
#ifdef CONFIG_SCHED_WALT
extern unsigned int __weak sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int __weak sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];

View file

@ -134,6 +134,10 @@ static unsigned long one_ul = 1;
static unsigned long long_max = LONG_MAX;
static int one_hundred = 100;
static int one_thousand = 1000;
#ifdef CONFIG_QCOM_HYP_CORE_CTL
static int five_hundred = 500;
static int five_thousand = 5000;
#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@ -346,6 +350,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_QCOM_HYP_CORE_CTL
{
.procname = "hh_suspend_timeout_ms",
.data = &sysctl_hh_suspend_timeout_ms,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = &five_hundred,
.extra2 = &five_thousand,
},
#endif
#ifdef CONFIG_SCHED_WALT
{
.procname = "sched_user_hint",