[linux-yocto] [PATCH 1/1] sched/cgroup: Fix/cleanup cgroup teardown/init

Bruce Ashfield bruce.ashfield at windriver.com
Tue Apr 19 10:42:47 PDT 2016


On 2016-04-18 3:38 AM, Mikko Ylinen wrote:
> This commit backports 2f5177f0fd7e531b26d54633be62d1d4cb94621c
> from linux-stable.

Thanks for the patch, but just a couple of questions.

2f5177f0fd7e531b26d54633be62d1d4cb94621c is a mainline commit ID
and my -stable tree is only showing it on master. Are you saying
that this is in a 4.4-stable ? or a 4.5-stable ? If so, I'll just
grab the entire -stable versus cherry picking. I stay up to date
with them (stable releases), but have to pause near a release
point (like we are now).

If this is a straight backport from mainline, that's fine as well,
I just would drop the "linux-stable" comment.

I may have just not been looking on the right branches, but I
didn't see it in my 4.4-stable branch.

Bruce


>
> We've seen frequent oopses with linux-yocto-4.4 and this commit
> helps to get rid of those.
>
> Changes:
>
> Peter Zijlstra (1):
>    sched/cgroup: Fix/cleanup cgroup teardown/init
>
>   kernel/sched/core.c | 35 ++++++++++++++---------------------
>   1 file changed, 14 insertions(+), 21 deletions(-)
>
> Commit log:
>
> The CPU controller hasn't kept up with the various changes in the whole
> cgroup initialization / destruction sequence, and commit:
>
>    2e91fa7f6d45 ("cgroup: keep zombies associated with their original cgroups")
>
> caused it to explode.
>
> The reason for this is that zombies do not inhibit css_offline() from
> being called, but do stall css_released(). Now we tear down the cfs_rq
> structures on css_offline() but zombies can run after that, leading to
> use-after-free issues.
>
> The solution is to move the tear-down to css_released(), which
> guarantees nobody (including no zombies) is still using our cgroup.
>
> Furthermore, a few simple cleanups are possible too. There doesn't
> appear to be any point to us using css_online() (anymore?) so fold that
> in css_alloc().
>
> And since cgroup code guarantees an RCU grace period between
> css_released() and css_free() we can forgo using call_rcu() and free the
> stuff immediately.
>
> Signed-off-by: Mikko Ylinen <mikko.ylinen at intel.com>
> ---
>   kernel/sched/core.c | 35 ++++++++++++++---------------------
>   1 file changed, 14 insertions(+), 21 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index eb70592..bcda4f8 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -7692,7 +7692,7 @@ void set_curr_task(int cpu, struct task_struct *p)
>   /* task_group_lock serializes the addition/removal of task groups */
>   static DEFINE_SPINLOCK(task_group_lock);
>
> -static void free_sched_group(struct task_group *tg)
> +static void sched_free_group(struct task_group *tg)
>   {
>   	free_fair_sched_group(tg);
>   	free_rt_sched_group(tg);
> @@ -7718,7 +7718,7 @@ struct task_group *sched_create_group(struct task_group *parent)
>   	return tg;
>
>   err:
> -	free_sched_group(tg);
> +	sched_free_group(tg);
>   	return ERR_PTR(-ENOMEM);
>   }
>
> @@ -7738,17 +7738,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
>   }
>
>   /* rcu callback to free various structures associated with a task group */
> -static void free_sched_group_rcu(struct rcu_head *rhp)
> +static void sched_free_group_rcu(struct rcu_head *rhp)
>   {
>   	/* now it should be safe to free those cfs_rqs */
> -	free_sched_group(container_of(rhp, struct task_group, rcu));
> +	sched_free_group(container_of(rhp, struct task_group, rcu));
>   }
>
> -/* Destroy runqueue etc associated with a task group */
>   void sched_destroy_group(struct task_group *tg)
>   {
>   	/* wait for possible concurrent references to cfs_rqs complete */
> -	call_rcu(&tg->rcu, free_sched_group_rcu);
> +	call_rcu(&tg->rcu, sched_free_group_rcu);
>   }
>
>   void sched_offline_group(struct task_group *tg)
> @@ -8209,31 +8208,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
>   	if (IS_ERR(tg))
>   		return ERR_PTR(-ENOMEM);
>
> +	sched_online_group(tg, parent);
> +
>   	return &tg->css;
>   }
>
> -static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
> +static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
>   {
>   	struct task_group *tg = css_tg(css);
> -	struct task_group *parent = css_tg(css->parent);
>
> -	if (parent)
> -		sched_online_group(tg, parent);
> -	return 0;
> +	sched_offline_group(tg);
>   }
>
>   static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
>   {
>   	struct task_group *tg = css_tg(css);
>
> -	sched_destroy_group(tg);
> -}
> -
> -static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
> -{
> -	struct task_group *tg = css_tg(css);
> -
> -	sched_offline_group(tg);
> +	/*
> +	 * Relies on the RCU grace period between css_released() and this.
> +	 */
> +	sched_free_group(tg);
>   }
>
>   static void cpu_cgroup_fork(struct task_struct *task, void *private)
> @@ -8593,9 +8587,8 @@ static struct cftype cpu_files[] = {
>
>   struct cgroup_subsys cpu_cgrp_subsys = {
>   	.css_alloc	= cpu_cgroup_css_alloc,
> +	.css_released	= cpu_cgroup_css_released,
>   	.css_free	= cpu_cgroup_css_free,
> -	.css_online	= cpu_cgroup_css_online,
> -	.css_offline	= cpu_cgroup_css_offline,
>   	.fork		= cpu_cgroup_fork,
>   	.can_attach	= cpu_cgroup_can_attach,
>   	.attach		= cpu_cgroup_attach,
>



More information about the linux-yocto mailing list