diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 88e8634607266865d965c862296441a95789aad8..3ac78a2f4b5a72eb259177c7ddac5f85dd70493a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -260,6 +260,9 @@ struct css_set { * during subsystem registration (at boot time). */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; }; /* diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3e356b05b2d571902c0678e3a7c7e00ce024d315..bf8dd1a9f2d1952b50e7ec37496c8a5ab6ac7b30 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -267,6 +267,12 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) return &css_set_table[index]; } +static void free_css_set_rcu(struct rcu_head *obj) +{ + struct css_set *cg = container_of(obj, struct css_set, rcu_head); + kfree(cg); +} + /* We don't maintain the lists running through each css_set to its * task until after the first call to cgroup_iter_start(). This * reduces the fork()/exit() overhead for people who have cgroups @@ -310,7 +316,7 @@ static void __put_css_set(struct css_set *cg, int taskexit) } write_unlock(&css_set_lock); - kfree(cg); + call_rcu(&cg->rcu_head, free_css_set_rcu); } /*