[RFC][-mm] Memory controller add mm->owner - Kernel

This is a discussion on [RFC][-mm] Memory controller add mm->owner - Kernel ; This patch removes the mem_cgroup member from mm_struct and instead adds an owner. This approach was suggested by Paul Menage. The advantage of this approach is that, once the mm->owner is known, using the subsystem id, the cgroup can be ...

+ Reply to Thread
Results 1 to 13 of 13

Thread: [RFC][-mm] Memory controller add mm->owner

  1. [RFC][-mm] Memory controller add mm->owner



    This patch removes the mem_cgroup member from mm_struct and instead adds
    an owner. This approach was suggested by Paul Menage. The advantage of
    this approach is that, once the mm->owner is known, using the subsystem
    id, the cgroup can be determined. It also allows several control groups
    that are virtually grouped by mm_struct, to exist independent of the memory
    controller i.e., without adding mem_cgroup's for each controller,
    to mm_struct.

    The code initially assigns mm->owner to the task and then after the
    thread group leader is identified. The mm->owner is changed to the thread
    group leader of the task later at the end of copy_process.

    Signed-off-by: Balbir Singh
    ---

    include/linux/memcontrol.h | 14 +++++++++++++-
    include/linux/mm_types.h | 5 ++++-
    kernel/fork.c | 4 ++++
    mm/memcontrol.c | 42 ++++++++++++++++++++++++++++++++++--------
    4 files changed, 55 insertions(+), 10 deletions(-)

    diff -puN include/linux/mm_types.h~memory-controller-add-mm-owner include/linux/mm_types.h
    --- linux-2.6.25-rc5/include/linux/mm_types.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    +++ linux-2.6.25-rc5-balbir/include/linux/mm_types.h 2008-03-20 15:11:05.000000000 +0530
    @@ -228,7 +228,10 @@ struct mm_struct {
    rwlock_t ioctx_list_lock;
    struct kioctx *ioctx_list;
    #ifdef CONFIG_CGROUP_MEM_RES_CTLR
    - struct mem_cgroup *mem_cgroup;
    + struct task_struct *owner; /* The thread group leader that */
    + /* owns the mm_struct. This */
    + /* might be useful even outside */
    + /* of the config option */
    #endif

    #ifdef CONFIG_PROC_FS
    diff -puN kernel/fork.c~memory-controller-add-mm-owner kernel/fork.c
    --- linux-2.6.25-rc5/kernel/fork.c~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    +++ linux-2.6.25-rc5-balbir/kernel/fork.c 2008-03-24 18:49:29.000000000 +0530
    @@ -1357,6 +1357,10 @@ static struct task_struct *copy_process(
    write_unlock_irq(&tasklist_lock);
    proc_fork_connector(p);
    cgroup_post_fork(p);
    +
    + if (!(clone_flags & CLONE_VM))
    + mem_cgroup_fork_init(p);
    +
    return p;

    bad_fork_free_pid:
    diff -puN include/linux/memcontrol.h~memory-controller-add-mm-owner include/linux/memcontrol.h
    --- linux-2.6.25-rc5/include/linux/memcontrol.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    +++ linux-2.6.25-rc5-balbir/include/linux/memcontrol.h 2008-03-24 18:49:52.000000000 +0530
    @@ -29,6 +29,7 @@ struct mm_struct;

    extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
    extern void mm_free_cgroup(struct mm_struct *mm);
    +extern void mem_cgroup_fork_init(struct task_struct *p);

    #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)

    @@ -49,7 +50,7 @@ extern void mem_cgroup_out_of_memory(str
    int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);

    #define mm_match_cgroup(mm, cgroup) \
    - ((cgroup) == rcu_dereference((mm)->mem_cgroup))
    + ((cgroup) == mem_cgroup_from_task((mm)->owner))

    extern int mem_cgroup_prepare_migration(struct page *page);
    extern void mem_cgroup_end_migration(struct page *page);
    @@ -72,6 +73,8 @@ extern long mem_cgroup_calc_reclaim_acti
    extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
    struct zone *zone, int priority);

    +extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
    +
    #else /* CONFIG_CGROUP_MEM_RES_CTLR */
    static inline void mm_init_cgroup(struct mm_struct *mm,
    struct task_struct *p)
    @@ -82,6 +85,10 @@ static inline void mm_free_cgroup(struct
    {
    }

    +static inline void mem_cgroup_fork_init(struct task_struct *p)
    +{
    +}
    +
    static inline void page_reset_bad_cgroup(struct page *page)
    {
    }
    @@ -172,6 +179,11 @@ static inline long mem_cgroup_calc_recla
    {
    return 0;
    }
    +
    +static void mm_free_fork_cgroup(struct task_struct *p)
    +{
    +}
    +
    #endif /* CONFIG_CGROUP_MEM_CONT */

    #endif /* _LINUX_MEMCONTROL_H */
    diff -puN mm/memcontrol.c~memory-controller-add-mm-owner mm/memcontrol.c
    --- linux-2.6.25-rc5/mm/memcontrol.c~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    +++ linux-2.6.25-rc5-balbir/mm/memcontrol.c 2008-03-24 19:04:32.000000000 +0530
    @@ -236,7 +236,7 @@ static struct mem_cgroup *mem_cgroup_fro
    css);
    }

    -static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    +struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    {
    return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
    struct mem_cgroup, css);
    @@ -248,12 +248,40 @@ void mm_init_cgroup(struct mm_struct *mm

    mem = mem_cgroup_from_task(p);
    css_get(&mem->css);
    - mm->mem_cgroup = mem;
    + mm->owner = p;
    +}
    +
    +void mem_cgroup_fork_init(struct task_struct *p)
    +{
    + struct mm_struct *mm = get_task_mm(p);
    + struct mem_cgroup *mem, *oldmem;
    + if (!mm)
    + return;
    +
    + /*
    + * Initial owner at mm_init_cgroup() time is the task itself.
    + * The thread group leader had not been setup then
    + */
    + oldmem = mem_cgroup_from_task(mm->owner);
    + /*
    + * Override the mm->owner after we know the thread group later
    + */
    + mm->owner = p->group_leader;
    + mem = mem_cgroup_from_task(mm->owner);
    + css_get(&mem->css);
    + css_put(&oldmem->css);
    + mmput(mm);
    }

    void mm_free_cgroup(struct mm_struct *mm)
    {
    - css_put(&mm->mem_cgroup->css);
    + struct mem_cgroup *mem;
    +
    + /*
    + * TODO: Should we assign mm->owner to NULL here?
    + */
    + mem = mem_cgroup_from_task(mm->owner);
    + css_put(&mem->css);
    }

    static inline int page_cgroup_locked(struct page *page)
    @@ -476,6 +504,7 @@ unsigned long mem_cgroup_isolate_pages(u
    int zid = zone_idx(z);
    struct mem_cgroup_per_zone *mz;

    + BUG_ON(!mem_cont);
    mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
    if (active)
    src = &mz->active_list;
    @@ -573,13 +602,11 @@ retry:
    if (!mm)
    mm = &init_mm;

    - rcu_read_lock();
    - mem = rcu_dereference(mm->mem_cgroup);
    + mem = mem_cgroup_from_task(mm->owner);
    /*
    * For every charge from the cgroup, increment reference count
    */
    css_get(&mem->css);
    - rcu_read_unlock();

    while (res_counter_charge(&mem->res, PAGE_SIZE)) {
    if (!(gfp_mask & __GFP_WAIT))
    @@ -988,7 +1015,7 @@ mem_cgroup_create(struct cgroup_subsys *

    if (unlikely((cont->parent) == NULL)) {
    mem = &init_mem_cgroup;
    - init_mm.mem_cgroup = mem;
    + init_mm.owner = &init_task;
    } else
    mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);

    @@ -1069,7 +1096,6 @@ static void mem_cgroup_move_task(struct
    goto out;

    css_get(&mem->css);
    - rcu_assign_pointer(mm->mem_cgroup, mem);
    css_put(&old_mem->css);

    out:
    _

    --
    Warm Regards,
    Balbir Singh
    Linux Technology Center
    IBM, ISTL
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. Re: [RFC][-mm] Memory controller add mm->owner

    On Mon, Mar 24, 2008 at 7:01 AM, Balbir Singh wrote:
    > --- linux-2.6.25-rc5/include/linux/mm_types.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > +++ linux-2.6.25-rc5-balbir/include/linux/mm_types.h 2008-03-20 15:11:05.000000000 +0530
    > @@ -228,7 +228,10 @@ struct mm_struct {
    > rwlock_t ioctx_list_lock;
    > struct kioctx *ioctx_list;
    > #ifdef CONFIG_CGROUP_MEM_RES_CTLR
    > - struct mem_cgroup *mem_cgroup;
    > + struct task_struct *owner; /* The thread group leader that */
    > + /* owns the mm_struct. This */
    > + /* might be useful even outside */
    > + /* of the config option */
    > #endif


    This should probably be controlled by something like a CONFIG_MM_OWNER
    that's selected by any Kconfig option (mem cgroup, etc) that needs
    mm->owner to be maintained.

    > @@ -248,12 +248,40 @@ void mm_init_cgroup(struct mm_struct *mm
    >
    > mem = mem_cgroup_from_task(p);
    > css_get(&mem->css);
    > - mm->mem_cgroup = mem;
    > + mm->owner = p;
    > +}
    > +
    > +void mem_cgroup_fork_init(struct task_struct *p)
    > +{
    > + struct mm_struct *mm = get_task_mm(p);
    > + struct mem_cgroup *mem, *oldmem;
    > + if (!mm)
    > + return;
    > +
    > + /*
    > + * Initial owner at mm_init_cgroup() time is the task itself.
    > + * The thread group leader had not been setup then
    > + */
    > + oldmem = mem_cgroup_from_task(mm->owner);
    > + /*
    > + * Override the mm->owner after we know the thread group later
    > + */
    > + mm->owner = p->group_leader;
    > + mem = mem_cgroup_from_task(mm->owner);
    > + css_get(&mem->css);
    > + css_put(&oldmem->css);
    > + mmput(mm);
    > }
    >
    > void mm_free_cgroup(struct mm_struct *mm)
    > {
    > - css_put(&mm->mem_cgroup->css);
    > + struct mem_cgroup *mem;
    > +
    > + /*
    > + * TODO: Should we assign mm->owner to NULL here?
    > + */
    > + mem = mem_cgroup_from_task(mm->owner);
    > + css_put(&mem->css);
    > }


    It seems to me that the code to setup/maintain mm->owner should be
    independent of the control groups, but should be part of the generic
    fork/exit code.

    Also, if mm->owner exits but mm is still alive (unlikely, but could
    happen with weird custom threading libraries?) then we need to
    reassign mm->owner to one of the other users of the mm (by looking
    first in the thread group, then among the parents/siblings/children,
    and then among all processes as a last resort?)

    >
    > - rcu_read_lock();
    > - mem = rcu_dereference(mm->mem_cgroup);
    > + mem = mem_cgroup_from_task(mm->owner);


    I think we still need the rcu_read_lock(), since mm->owner can move
    cgroups any time.

    >
    > @@ -1069,7 +1096,6 @@ static void mem_cgroup_move_task(struct
    > goto out;
    >
    > css_get(&mem->css);
    > - rcu_assign_pointer(mm->mem_cgroup, mem);
    > css_put(&old_mem->css);
    >


    We shouldn't need reference counting on this pointer, since the
    cgroups framework won't allow a subsystem to be freed while it has any
    tasks in it.

    Paul
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. Re: [RFC][-mm] Memory controller add mm->owner

    Paul Menage wrote:
    > On Mon, Mar 24, 2008 at 7:01 AM, Balbir Singh wrote:
    >> --- linux-2.6.25-rc5/include/linux/mm_types.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    >> +++ linux-2.6.25-rc5-balbir/include/linux/mm_types.h 2008-03-20 15:11:05.000000000 +0530
    >> @@ -228,7 +228,10 @@ struct mm_struct {
    >> rwlock_t ioctx_list_lock;
    >> struct kioctx *ioctx_list;
    >> #ifdef CONFIG_CGROUP_MEM_RES_CTLR
    >> - struct mem_cgroup *mem_cgroup;
    >> + struct task_struct *owner; /* The thread group leader that */
    >> + /* owns the mm_struct. This */
    >> + /* might be useful even outside */
    >> + /* of the config option */
    >> #endif

    >
    > This should probably be controlled by something like a CONFIG_MM_OWNER
    > that's selected by any Kconfig option (mem cgroup, etc) that needs
    > mm->owner to be maintained.
    >


    OK, will do

    >> @@ -248,12 +248,40 @@ void mm_init_cgroup(struct mm_struct *mm
    >>
    >> mem = mem_cgroup_from_task(p);
    >> css_get(&mem->css);
    >> - mm->mem_cgroup = mem;
    >> + mm->owner = p;
    >> +}
    >> +
    >> +void mem_cgroup_fork_init(struct task_struct *p)
    >> +{
    >> + struct mm_struct *mm = get_task_mm(p);
    >> + struct mem_cgroup *mem, *oldmem;
    >> + if (!mm)
    >> + return;
    >> +
    >> + /*
    >> + * Initial owner at mm_init_cgroup() time is the task itself.
    >> + * The thread group leader had not been setup then
    >> + */
    >> + oldmem = mem_cgroup_from_task(mm->owner);
    >> + /*
    >> + * Override the mm->owner after we know the thread group later
    >> + */
    >> + mm->owner = p->group_leader;
    >> + mem = mem_cgroup_from_task(mm->owner);
    >> + css_get(&mem->css);
    >> + css_put(&oldmem->css);
    >> + mmput(mm);
    >> }
    >>
    >> void mm_free_cgroup(struct mm_struct *mm)
    >> {
    >> - css_put(&mm->mem_cgroup->css);
    >> + struct mem_cgroup *mem;
    >> +
    >> + /*
    >> + * TODO: Should we assign mm->owner to NULL here?
    >> + */
    >> + mem = mem_cgroup_from_task(mm->owner);
    >> + css_put(&mem->css);
    >> }

    >
    > It seems to me that the code to setup/maintain mm->owner should be
    > independent of the control groups, but should be part of the generic
    > fork/exit code.
    >


    Hmm.. Yes, we will need to do that if we decide to go with the MM_OWNER approach.

    > Also, if mm->owner exits but mm is still alive (unlikely, but could
    > happen with weird custom threading libraries?) then we need to
    > reassign mm->owner to one of the other users of the mm (by looking
    > first in the thread group, then among the parents/siblings/children,
    > and then among all processes as a last resort?)
    >


    The comment in __exit_signal states that

    "The group leader stays around as a zombie as long
    as there are other threads. When it gets reaped,
    the exit.c code will add its counts into these totals."

    Given that the thread group leader stays around, do we need to reassign
    mm->owner? Do you do anything special in cgroups like cleanup the
    task_struct->css->subsys_state on exit?

    >> - rcu_read_lock();
    >> - mem = rcu_dereference(mm->mem_cgroup);
    >> + mem = mem_cgroup_from_task(mm->owner);

    >
    > I think we still need the rcu_read_lock(), since mm->owner can move
    > cgroups any time.
    >


    OK, so cgroup task movement is protected by RCU, right? I'll check for all
    mm->owner uses.

    >> @@ -1069,7 +1096,6 @@ static void mem_cgroup_move_task(struct
    >> goto out;
    >>
    >> css_get(&mem->css);
    >> - rcu_assign_pointer(mm->mem_cgroup, mem);
    >> css_put(&old_mem->css);
    >>

    >
    > We shouldn't need reference counting on this pointer, since the
    > cgroups framework won't allow a subsystem to be freed while it has any
    > tasks in it.
    >


    This reference earlier indicated that there were active mm->mem_cgroup users of
    the cgroup. With mm->owner changes, we might not require this. Let me double
    confirm that.

    Thanks for the review.

    --
    Warm Regards,
    Balbir Singh
    Linux Technology Center
    IBM, ISTL
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. Re: [RFC][-mm] Memory controller add mm->owner

    On Mon, Mar 24, 2008 at 9:21 AM, Balbir Singh wrote:
    > > Also, if mm->owner exits but mm is still alive (unlikely, but could
    > > happen with weird custom threading libraries?) then we need to
    > > reassign mm->owner to one of the other users of the mm (by looking
    > > first in the thread group, then among the parents/siblings/children,
    > > and then among all processes as a last resort?)
    > >

    >
    > The comment in __exit_signal states that
    >
    > "The group leader stays around as a zombie as long
    > as there are other threads. When it gets reaped,
    > the exit.c code will add its counts into these totals."


    Ah, that's useful to know.

    >
    > Given that the thread group leader stays around, do we need to reassign
    > mm->owner? Do you do anything special in cgroups like cleanup the
    > task_struct->css->subsys_state on exit?
    >


    OK, so we don't need to handle this for NPTL apps - but for anything
    still using LinuxThreads or manually constructed clone() calls that
    use CLONE_VM without CLONE_PID, this could still be an issue. (Also I
    guess there's the case of someone holding a reference to the mm via a
    /proc file?)

    >
    > >> - rcu_read_lock();
    > >> - mem = rcu_dereference(mm->mem_cgroup);
    > >> + mem = mem_cgroup_from_task(mm->owner);

    > >
    > > I think we still need the rcu_read_lock(), since mm->owner can move
    > > cgroups any time.
    > >

    >
    > OK, so cgroup task movement is protected by RCU, right? I'll check for all
    > mm->owner uses.
    >


    Yes - cgroup_attach() uses synchronize_rcu() before release the cgroup
    mutex. So although you can't guarantee that the cgroup set won't
    change if you're just using RCU, you can't guarantee that you're
    addressing a still-valid non-destroyed (and of course non-freed)
    cgroup set.

    Paul
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. Re: [RFC][-mm] Memory controller add mm->owner

    Paul Menage wrote:
    > On Mon, Mar 24, 2008 at 9:21 AM, Balbir Singh wrote:
    >> > Also, if mm->owner exits but mm is still alive (unlikely, but could
    >> > happen with weird custom threading libraries?) then we need to
    >> > reassign mm->owner to one of the other users of the mm (by looking
    >> > first in the thread group, then among the parents/siblings/children,
    >> > and then among all processes as a last resort?)
    >> >

    >>
    >> The comment in __exit_signal states that
    >>
    >> "The group leader stays around as a zombie as long
    >> as there are other threads. When it gets reaped,
    >> the exit.c code will add its counts into these totals."

    >
    > Ah, that's useful to know.
    >
    >> Given that the thread group leader stays around, do we need to reassign
    >> mm->owner? Do you do anything special in cgroups like cleanup the
    >> task_struct->css->subsys_state on exit?
    >>

    >
    > OK, so we don't need to handle this for NPTL apps - but for anything
    > still using LinuxThreads or manually constructed clone() calls that
    > use CLONE_VM without CLONE_PID, this could still be an issue.


    CLONE_PID?? Do you mean CLONE_THREAD?

    For the case you mentioned, mm->owner is a moving target and we don't want to
    spend time finding the successor, that can be expensive when threads start
    exiting one-by-one quickly and when the number of threads are high. I wonder if
    there is an efficient way to find mm->owner in that case.

    (Also I
    > guess there's the case of someone holding a reference to the mm via a
    > /proc file?)
    >


    Yes, but in that case we'll not be charging/uncharging anything to that mm or
    the cgroup to which the mm belongs.

    >> >> - rcu_read_lock();
    >> >> - mem = rcu_dereference(mm->mem_cgroup);
    >> >> + mem = mem_cgroup_from_task(mm->owner);
    >> >
    >> > I think we still need the rcu_read_lock(), since mm->owner can move
    >> > cgroups any time.
    >> >

    >>
    >> OK, so cgroup task movement is protected by RCU, right? I'll check for all
    >> mm->owner uses.
    >>

    >
    > Yes - cgroup_attach() uses synchronize_rcu() before release the cgroup
    > mutex. So although you can't guarantee that the cgroup set won't
    > change if you're just using RCU, you can't guarantee that you're
    > addressing a still-valid non-destroyed (and of course non-freed)
    > cgroup set.
    >


    Yes, I understand that part of RCU.

    --
    Warm Regards,
    Balbir Singh
    Linux Technology Center
    IBM, ISTL
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. Re: [RFC][-mm] Memory controller add mm->owner

    On Mon, Mar 24, 2008 at 10:33 AM, Balbir Singh
    wrote:
    > > OK, so we don't need to handle this for NPTL apps - but for anything
    > > still using LinuxThreads or manually constructed clone() calls that
    > > use CLONE_VM without CLONE_PID, this could still be an issue.

    >
    > CLONE_PID?? Do you mean CLONE_THREAD?


    Yes, sorry - CLONE_THREAD.

    >
    > For the case you mentioned, mm->owner is a moving target and we don't want to
    > spend time finding the successor, that can be expensive when threads start
    > exiting one-by-one quickly and when the number of threads are high. I wonder if
    > there is an efficient way to find mm->owner in that case.
    >


    But:

    - running a high-threadcount LinuxThreads process is by definition
    inefficient and expensive (hence the move to NPTL)

    - any potential performance hit is only paid at exit time

    - in the normal case, any of your children or one of your siblings
    will be a suitable alternate owner

    - in the worst case, it's not going to be worse than doing a
    for_each_thread() loop

    so I don't think this would be a major problem

    Paul
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. Re: [RFC][-mm] Memory controller add mm->owner

    Balbir Singh wrote:
    > This patch removes the mem_cgroup member from mm_struct and instead adds
    > an owner. This approach was suggested by Paul Menage. The advantage of
    > this approach is that, once the mm->owner is known, using the subsystem
    > id, the cgroup can be determined. It also allows several control groups
    > that are virtually grouped by mm_struct, to exist independent of the memory
    > controller i.e., without adding mem_cgroup's for each controller,
    > to mm_struct.
    >
    > The code initially assigns mm->owner to the task and then after the
    > thread group leader is identified. The mm->owner is changed to the thread
    > group leader of the task later at the end of copy_process.
    >
    > Signed-off-by: Balbir Singh
    > ---
    >
    > include/linux/memcontrol.h | 14 +++++++++++++-
    > include/linux/mm_types.h | 5 ++++-
    > kernel/fork.c | 4 ++++
    > mm/memcontrol.c | 42 ++++++++++++++++++++++++++++++++++--------
    > 4 files changed, 55 insertions(+), 10 deletions(-)
    >
    > diff -puN include/linux/mm_types.h~memory-controller-add-mm-owner include/linux/mm_types.h
    > --- linux-2.6.25-rc5/include/linux/mm_types.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > +++ linux-2.6.25-rc5-balbir/include/linux/mm_types.h 2008-03-20 15:11:05.000000000 +0530
    > @@ -228,7 +228,10 @@ struct mm_struct {
    > rwlock_t ioctx_list_lock;
    > struct kioctx *ioctx_list;
    > #ifdef CONFIG_CGROUP_MEM_RES_CTLR
    > - struct mem_cgroup *mem_cgroup;
    > + struct task_struct *owner; /* The thread group leader that */
    > + /* owns the mm_struct. This */
    > + /* might be useful even outside */
    > + /* of the config option */
    > #endif
    >
    > #ifdef CONFIG_PROC_FS
    > diff -puN kernel/fork.c~memory-controller-add-mm-owner kernel/fork.c
    > --- linux-2.6.25-rc5/kernel/fork.c~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > +++ linux-2.6.25-rc5-balbir/kernel/fork.c 2008-03-24 18:49:29.000000000 +0530
    > @@ -1357,6 +1357,10 @@ static struct task_struct *copy_process(
    > write_unlock_irq(&tasklist_lock);
    > proc_fork_connector(p);
    > cgroup_post_fork(p);
    > +
    > + if (!(clone_flags & CLONE_VM))
    > + mem_cgroup_fork_init(p);
    > +
    > return p;
    >
    > bad_fork_free_pid:
    > diff -puN include/linux/memcontrol.h~memory-controller-add-mm-owner include/linux/memcontrol.h
    > --- linux-2.6.25-rc5/include/linux/memcontrol.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > +++ linux-2.6.25-rc5-balbir/include/linux/memcontrol.h 2008-03-24 18:49:52.000000000 +0530
    > @@ -29,6 +29,7 @@ struct mm_struct;
    >
    > extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
    > extern void mm_free_cgroup(struct mm_struct *mm);
    > +extern void mem_cgroup_fork_init(struct task_struct *p);
    >
    > #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
    >
    > @@ -49,7 +50,7 @@ extern void mem_cgroup_out_of_memory(str
    > int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
    >
    > #define mm_match_cgroup(mm, cgroup) \
    > - ((cgroup) == rcu_dereference((mm)->mem_cgroup))
    > + ((cgroup) == mem_cgroup_from_task((mm)->owner))
    >
    > extern int mem_cgroup_prepare_migration(struct page *page);
    > extern void mem_cgroup_end_migration(struct page *page);
    > @@ -72,6 +73,8 @@ extern long mem_cgroup_calc_reclaim_acti
    > extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
    > struct zone *zone, int priority);
    >
    > +extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
    > +
    > #else /* CONFIG_CGROUP_MEM_RES_CTLR */
    > static inline void mm_init_cgroup(struct mm_struct *mm,
    > struct task_struct *p)
    > @@ -82,6 +85,10 @@ static inline void mm_free_cgroup(struct
    > {
    > }
    >
    > +static inline void mem_cgroup_fork_init(struct task_struct *p)
    > +{
    > +}
    > +
    > static inline void page_reset_bad_cgroup(struct page *page)
    > {
    > }
    > @@ -172,6 +179,11 @@ static inline long mem_cgroup_calc_recla
    > {
    > return 0;
    > }
    > +
    > +static void mm_free_fork_cgroup(struct task_struct *p)
    > +{
    > +}
    > +


    Where is this function used? I don't see the corresponding one
    with CONFIG_CGROUP_MEM_RES_CTLR enabled?

    > #endif /* CONFIG_CGROUP_MEM_CONT */
    >
    > #endif /* _LINUX_MEMCONTROL_H */
    > diff -puN mm/memcontrol.c~memory-controller-add-mm-owner mm/memcontrol.c
    > --- linux-2.6.25-rc5/mm/memcontrol.c~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > +++ linux-2.6.25-rc5-balbir/mm/memcontrol.c 2008-03-24 19:04:32.000000000 +0530
    > @@ -236,7 +236,7 @@ static struct mem_cgroup *mem_cgroup_fro
    > css);
    > }
    >
    > -static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    > +struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    > {
    > return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
    > struct mem_cgroup, css);
    > @@ -248,12 +248,40 @@ void mm_init_cgroup(struct mm_struct *mm
    >
    > mem = mem_cgroup_from_task(p);
    > css_get(&mem->css);
    > - mm->mem_cgroup = mem;
    > + mm->owner = p;
    > +}
    > +
    > +void mem_cgroup_fork_init(struct task_struct *p)
    > +{
    > + struct mm_struct *mm = get_task_mm(p);
    > + struct mem_cgroup *mem, *oldmem;


    Leave an empty line here.

    > + if (!mm)
    > + return;
    > +
    > + /*
    > + * Initial owner at mm_init_cgroup() time is the task itself.
    > + * The thread group leader had not been setup then
    > + */
    > + oldmem = mem_cgroup_from_task(mm->owner);
    > + /*
    > + * Override the mm->owner after we know the thread group later
    > + */
    > + mm->owner = p->group_leader;
    > + mem = mem_cgroup_from_task(mm->owner);
    > + css_get(&mem->css);
    > + css_put(&oldmem->css);
    > + mmput(mm);
    > }
    >
    > void mm_free_cgroup(struct mm_struct *mm)
    > {
    > - css_put(&mm->mem_cgroup->css);
    > + struct mem_cgroup *mem;
    > +
    > + /*
    > + * TODO: Should we assign mm->owner to NULL here?
    > + */
    > + mem = mem_cgroup_from_task(mm->owner);
    > + css_put(&mem->css);
    > }
    >
    > static inline int page_cgroup_locked(struct page *page)
    > @@ -476,6 +504,7 @@ unsigned long mem_cgroup_isolate_pages(u
    > int zid = zone_idx(z);
    > struct mem_cgroup_per_zone *mz;
    >
    > + BUG_ON(!mem_cont);
    > mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
    > if (active)
    > src = &mz->active_list;
    > @@ -573,13 +602,11 @@ retry:
    > if (!mm)
    > mm = &init_mm;
    >
    > - rcu_read_lock();
    > - mem = rcu_dereference(mm->mem_cgroup);
    > + mem = mem_cgroup_from_task(mm->owner);
    > /*
    > * For every charge from the cgroup, increment reference count
    > */
    > css_get(&mem->css);
    > - rcu_read_unlock();
    >
    > while (res_counter_charge(&mem->res, PAGE_SIZE)) {
    > if (!(gfp_mask & __GFP_WAIT))
    > @@ -988,7 +1015,7 @@ mem_cgroup_create(struct cgroup_subsys *
    >
    > if (unlikely((cont->parent) == NULL)) {
    > mem = &init_mem_cgroup;
    > - init_mm.mem_cgroup = mem;
    > + init_mm.owner = &init_task;
    > } else
    > mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
    >
    > @@ -1069,7 +1096,6 @@ static void mem_cgroup_move_task(struct
    > goto out;
    >
    > css_get(&mem->css);
    > - rcu_assign_pointer(mm->mem_cgroup, mem);
    > css_put(&old_mem->css);
    >
    > out:
    > _
    >

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. Re: [RFC][-mm] Memory controller add mm->owner

    Paul Menage wrote:
    > On Mon, Mar 24, 2008 at 10:33 AM, Balbir Singh
    > wrote:
    >> > OK, so we don't need to handle this for NPTL apps - but for anything
    >> > still using LinuxThreads or manually constructed clone() calls that
    >> > use CLONE_VM without CLONE_PID, this could still be an issue.

    >>
    >> CLONE_PID?? Do you mean CLONE_THREAD?

    >
    > Yes, sorry - CLONE_THREAD.
    >
    >> For the case you mentioned, mm->owner is a moving target and we don't want to
    >> spend time finding the successor, that can be expensive when threads start
    >> exiting one-by-one quickly and when the number of threads are high. I wonder if
    >> there is an efficient way to find mm->owner in that case.
    >>

    >
    > But:
    >
    > - running a high-threadcount LinuxThreads process is by definition
    > inefficient and expensive (hence the move to NPTL)
    >
    > - any potential performance hit is only paid at exit time
    >
    > - in the normal case, any of your children or one of your siblings
    > will be a suitable alternate owner
    >
    > - in the worst case, it's not going to be worse than doing a
    > for_each_thread() loop
    >
    > so I don't think this would be a major problem
    >


    I've been looking at zap_threads, I suspect we'll end up implementing a similar
    loop, which makes me very uncomfortable. Adding code for the least possible
    scenario. It will not get invoked for CLONE_THREAD, but will get invoked for the
    case when CLONE_VM is set without CLONE_THREAD.

    I'll try and experiment a bit more and see what I come up with


    --
    Warm Regards,
    Balbir Singh
    Linux Technology Center
    IBM, ISTL
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  9. Re: [RFC][-mm] Memory controller add mm->owner

    On Tue, Mar 25, 2008 at 6:56 AM, Li Zefan wrote:
    >
    > Balbir Singh wrote:
    > > This patch removes the mem_cgroup member from mm_struct and instead adds
    > > an owner. This approach was suggested by Paul Menage. The advantage of
    > > this approach is that, once the mm->owner is known, using the subsystem
    > > id, the cgroup can be determined. It also allows several control groups
    > > that are virtually grouped by mm_struct, to exist independent of the memory
    > > controller i.e., without adding mem_cgroup's for each controller,
    > > to mm_struct.
    > >
    > > The code initially assigns mm->owner to the task and then after the
    > > thread group leader is identified. The mm->owner is changed to the thread
    > > group leader of the task later at the end of copy_process.
    > >
    > > Signed-off-by: Balbir Singh
    > > ---
    > >
    > > include/linux/memcontrol.h | 14 +++++++++++++-
    > > include/linux/mm_types.h | 5 ++++-
    > > kernel/fork.c | 4 ++++
    > > mm/memcontrol.c | 42 ++++++++++++++++++++++++++++++++++--------
    > > 4 files changed, 55 insertions(+), 10 deletions(-)
    > >
    > > diff -puN include/linux/mm_types.h~memory-controller-add-mm-owner include/linux/mm_types.h
    > > --- linux-2.6.25-rc5/include/linux/mm_types.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > > +++ linux-2.6.25-rc5-balbir/include/linux/mm_types.h 2008-03-20 15:11:05.000000000 +0530
    > > @@ -228,7 +228,10 @@ struct mm_struct {
    > > rwlock_t ioctx_list_lock;
    > > struct kioctx *ioctx_list;
    > > #ifdef CONFIG_CGROUP_MEM_RES_CTLR
    > > - struct mem_cgroup *mem_cgroup;
    > > + struct task_struct *owner; /* The thread group leader that */
    > > + /* owns the mm_struct. This */
    > > + /* might be useful even outside */
    > > + /* of the config option */
    > > #endif
    > >
    > > #ifdef CONFIG_PROC_FS
    > > diff -puN kernel/fork.c~memory-controller-add-mm-owner kernel/fork.c
    > > --- linux-2.6.25-rc5/kernel/fork.c~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > > +++ linux-2.6.25-rc5-balbir/kernel/fork.c 2008-03-24 18:49:29.000000000 +0530
    > > @@ -1357,6 +1357,10 @@ static struct task_struct *copy_process(
    > > write_unlock_irq(&tasklist_lock);
    > > proc_fork_connector(p);
    > > cgroup_post_fork(p);
    > > +
    > > + if (!(clone_flags & CLONE_VM))
    > > + mem_cgroup_fork_init(p);
    > > +
    > > return p;
    > >
    > > bad_fork_free_pid:
    > > diff -puN include/linux/memcontrol.h~memory-controller-add-mm-owner include/linux/memcontrol.h
    > > --- linux-2.6.25-rc5/include/linux/memcontrol.h~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > > +++ linux-2.6.25-rc5-balbir/include/linux/memcontrol.h 2008-03-24 18:49:52.000000000 +0530
    > > @@ -29,6 +29,7 @@ struct mm_struct;
    > >
    > > extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
    > > extern void mm_free_cgroup(struct mm_struct *mm);
    > > +extern void mem_cgroup_fork_init(struct task_struct *p);
    > >
    > > #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
    > >
    > > @@ -49,7 +50,7 @@ extern void mem_cgroup_out_of_memory(str
    > > int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
    > >
    > > #define mm_match_cgroup(mm, cgroup) \
    > > - ((cgroup) == rcu_dereference((mm)->mem_cgroup))
    > > + ((cgroup) == mem_cgroup_from_task((mm)->owner))
    > >
    > > extern int mem_cgroup_prepare_migration(struct page *page);
    > > extern void mem_cgroup_end_migration(struct page *page);
    > > @@ -72,6 +73,8 @@ extern long mem_cgroup_calc_reclaim_acti
    > > extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
    > > struct zone *zone, int priority);
    > >
    > > +extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
    > > +
    > > #else /* CONFIG_CGROUP_MEM_RES_CTLR */
    > > static inline void mm_init_cgroup(struct mm_struct *mm,
    > > struct task_struct *p)
    > > @@ -82,6 +85,10 @@ static inline void mm_free_cgroup(struct
    > > {
    > > }
    > >
    > > +static inline void mem_cgroup_fork_init(struct task_struct *p)
    > > +{
    > > +}
    > > +
    > > static inline void page_reset_bad_cgroup(struct page *page)
    > > {
    > > }
    > > @@ -172,6 +179,11 @@ static inline long mem_cgroup_calc_recla
    > > {
    > > return 0;
    > > }
    > > +
    > > +static void mm_free_fork_cgroup(struct task_struct *p)
    > > +{
    > > +}
    > > +

    >
    > Where is this function used? I don't see the corresponding one
    > with CONFIG_CGROUP_MEM_RES_CTLR enabled?
    >


    I kept that as a template for freeing up code. I'll remove that since
    it is additional code

    >
    > > #endif /* CONFIG_CGROUP_MEM_CONT */
    > >
    > > #endif /* _LINUX_MEMCONTROL_H */
    > > diff -puN mm/memcontrol.c~memory-controller-add-mm-owner mm/memcontrol.c
    > > --- linux-2.6.25-rc5/mm/memcontrol.c~memory-controller-add-mm-owner 2008-03-20 13:35:09.000000000 +0530
    > > +++ linux-2.6.25-rc5-balbir/mm/memcontrol.c 2008-03-24 19:04:32.000000000 +0530
    > > @@ -236,7 +236,7 @@ static struct mem_cgroup *mem_cgroup_fro
    > > css);
    > > }
    > >
    > > -static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    > > +struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    > > {
    > > return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
    > > struct mem_cgroup, css);
    > > @@ -248,12 +248,40 @@ void mm_init_cgroup(struct mm_struct *mm
    > >
    > > mem = mem_cgroup_from_task(p);
    > > css_get(&mem->css);
    > > - mm->mem_cgroup = mem;
    > > + mm->owner = p;
    > > +}
    > > +
    > > +void mem_cgroup_fork_init(struct task_struct *p)
    > > +{
    > > + struct mm_struct *mm = get_task_mm(p);
    > > + struct mem_cgroup *mem, *oldmem;

    >
    > Leave an empty line here.
    >


    OK


    Thanks for the review
    Balbir
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  10. Re: [RFC][-mm] Memory controller add mm->owner

    Balbir Singh wrote:
    > Paul Menage wrote:
    >> On Mon, Mar 24, 2008 at 10:33 AM, Balbir Singh
    >> wrote:
    >>> > OK, so we don't need to handle this for NPTL apps - but for anything
    >>> > still using LinuxThreads or manually constructed clone() calls that
    >>> > use CLONE_VM without CLONE_PID, this could still be an issue.
    >>>
    >>> CLONE_PID?? Do you mean CLONE_THREAD?

    >> Yes, sorry - CLONE_THREAD.
    >>
    >>> For the case you mentioned, mm->owner is a moving target and we don't want to
    >>> spend time finding the successor, that can be expensive when threads start
    >>> exiting one-by-one quickly and when the number of threads are high. I wonder if
    >>> there is an efficient way to find mm->owner in that case.
    >>>

    >> But:
    >>
    >> - running a high-threadcount LinuxThreads process is by definition
    >> inefficient and expensive (hence the move to NPTL)
    >>
    >> - any potential performance hit is only paid at exit time
    >>
    >> - in the normal case, any of your children or one of your siblings
    >> will be a suitable alternate owner
    >>
    >> - in the worst case, it's not going to be worse than doing a
    >> for_each_thread() loop
    >>


    This will have to be the common case, since you never know what combination of
    clone calls did CLONE_VM and what did CLONE_THREAD. At exit time, we need to pay
    a for_each_process() overhead. Although very unlikely, an application can call
    pthread_* functions (NPTL) and then do a clone with CLONE_VM, thus forcing
    threads in a thread group and another process to share the mm_struct. This makes
    mm->owner struct approach hard to implement.

    >> so I don't think this would be a major problem
    >>

    >
    > I've been looking at zap_threads, I suspect we'll end up implementing a similar
    > loop, which makes me very uncomfortable. Adding code for the least possible
    > scenario. It will not get invoked for CLONE_THREAD, but will get invoked for the
    > case when CLONE_VM is set without CLONE_THREAD.
    >
    > I'll try and experiment a bit more and see what I come up with


    I am yet to benchmark the cost of doing for_each_process() on every exit. I
    suspect, we'll see a big drop in performance. I am not sure anymore if mm->owner
    is worth the overhead.


    --
    Warm Regards,
    Balbir Singh
    Linux Technology Center
    IBM, ISTL
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  11. Re: [RFC][-mm] Memory controller add mm->owner

    On Wed, Mar 26, 2008 at 3:29 AM, Balbir Singh wrote:
    > >>
    > >> - in the worst case, it's not going to be worse than doing a
    > >> for_each_thread() loop
    > >>

    >
    > This will have to be the common case, since you never know what combination of
    > clone calls did CLONE_VM and what did CLONE_THREAD. At exit time, we need to pay
    > a for_each_process() overhead.


    I'm not convinced of this. All we have to do is find some other
    process p where p->mm == current->mm and make it the new owner.
    Exactly what sequence of clone() calls was used to cause the sharing
    isn't really relevant. I really think that a suitable candidate will
    be found amongst your children or your first sibling in 99.9% of those
    cases where more than one process is using an mm.

    The actual sequence would have to go something like:

    static inline bool need_new_owner(struct mm_struct *mm) {
    return (mm && mm->owner == current && atomic_read(&mm->users) > 1);
    }
    static inline void try_give_mm_ownership(
    struct task_struct *task,
    struct mm_struct *mm) {
    if (task->mm != mm) return;
    task_lock(task);
    if (task->mm == mm) {
    mm->owner = task;
    }
    task_unlock(task);
    }

    struct mm_struct *mm = current->mm;
    task_lock(current);
    current->mm = NULL;
    task_unlock(current);

    /* First try my children */
    if (need_new_owner(mm)) {
    for_each_child(current, c) {
    try_give_mm_ownership(c);
    if (!need_new_owner(mm)) break;
    }
    }

    /* Then try my siblings */
    if (need_new_owner(mm)) {
    for_each_child(current->real_parent, c) {
    try_give_mm_ownership(c);
    if (!need_new_owner(mm)) break;
    }
    }

    if (need_new_owner(mm)) {
    /* We'll almost never get here */
    for_each_process(p) {
    try_give_mm_ownership(p);
    if (!need_new_owner(mm)) break;
    }
    }

    Paul
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  12. Re: [RFC][-mm] Memory controller add mm->owner

    Paul Menage wrote:
    > On Wed, Mar 26, 2008 at 3:29 AM, Balbir Singh wrote:
    >> >>
    >> >> - in the worst case, it's not going to be worse than doing a
    >> >> for_each_thread() loop
    >> >>

    >>
    >> This will have to be the common case, since you never know what combination of
    >> clone calls did CLONE_VM and what did CLONE_THREAD. At exit time, we need to pay
    >> a for_each_process() overhead.

    >
    > I'm not convinced of this. All we have to do is find some other
    > process p where p->mm == current->mm and make it the new owner.
    > Exactly what sequence of clone() calls was used to cause the sharing
    > isn't really relevant. I really think that a suitable candidate will
    > be found amongst your children or your first sibling in 99.9% of those
    > cases where more than one process is using an mm.
    >


    Hmmm.. the 99.9% of the time is just guess work (not measured, could be possibly
    true). I see and understand your code below. But before I try and implement
    something like that, I was wondering why zap_threads() does not have that
    heuristic. That should explain my inhibition.

    Can anyone elaborate on zap_threads further?

    --
    Warm Regards,
    Balbir Singh
    Linux Technology Center
    IBM, ISTL
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  13. Re: [RFC][-mm] Memory controller add mm->owner

    On Wed, Mar 26, 2008 at 4:41 AM, Balbir Singh wrote:
    >
    > Hmmm.. the 99.9% of the time is just guess work (not measured, could be possibly
    > true). I see and understand your code below. But before I try and implement
    > something like that, I was wondering why zap_threads() does not have that
    > heuristic. That should explain my inhibition.
    >
    > Can anyone elaborate on zap_threads further?
    >


    zap_threads() has to find *all* the other users, whereas in this case
    we only have to find one other user.

    Paul
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread