sched: add CONFIG_SMP consistency - Kernel

This is a discussion on sched: add CONFIG_SMP consistency - Kernel ; From: Henrik Austad Do not declare select_task_rq as part of sched_class when CONFIG_SMP is not set. Signed-off-by: Henrik Austad --- include/linux/sched.h | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index c226c7b..9e0e33c 100644 --- a/include/linux/sched.h ...

+ Reply to Thread
Results 1 to 5 of 5

Thread: sched: add CONFIG_SMP consistency

  1. sched: add CONFIG_SMP consistency

    From: Henrik Austad

    Do not declare select_task_rq as part of sched_class when CONFIG_SMP is
    not set.

    Signed-off-by: Henrik Austad
    ---
    include/linux/sched.h | 3 ++-
    1 files changed, 2 insertions(+), 1 deletions(-)

    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index c226c7b..9e0e33c 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -898,8 +898,9 @@ struct sched_class {
    void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
    void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
    void (*yield_task) (struct rq *rq);
    +#ifdef CONFIG_SMP
    int (*select_task_rq)(struct task_struct *p, int sync);
    -
    +#endif
    void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);

    struct task_struct * (*pick_next_task) (struct rq *rq);
    --
    1.6.0.1

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. Re: sched: add CONFIG_SMP consistency

    Peter Zijlstra wrote:
    > On Tue, 2008-10-21 at 20:45 +0200, henrik@austad.us wrote:
    >> From: Henrik Austad
    >>
    >> Do not declare select_task_rq as part of sched_class when CONFIG_SMP is
    >> not set.

    >


    Not only select_task_rq(), but also set_cpus_allowed(), rq_online() and
    rq_offline().

    > While a proper cleanup, could you do it by re-arranging the methods so
    > as to not create an additional ifdef?
    >


    How about the following patch?

    ==================


    From: Li Zefan
    Date: Wed, 22 Oct 2008 15:13:06 +0800
    Subject: [PATCH] make CONFIG_SMP consistent for sched_class methods

    Do not declare select_task_rq and some other methods as part of sched_class
    when CONFIG_SMP is not set.

    Also gather those methods to avoid CONFIG_SMP mess.

    Signed-off-by: Li Zefan
    ---
    include/linux/sched.h | 12 +++++++-----
    kernel/sched_fair.c | 5 ++---
    kernel/sched_idletask.c | 5 ++---
    kernel/sched_rt.c | 5 ++---
    4 files changed, 13 insertions(+), 14 deletions(-)

    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 5c38db5..fc1a615 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -941,7 +941,6 @@ struct sched_class {
    void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
    void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
    void (*yield_task) (struct rq *rq);
    - int (*select_task_rq)(struct task_struct *p, int sync);

    void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);

    @@ -949,6 +948,8 @@ struct sched_class {
    void (*put_prev_task) (struct rq *rq, struct task_struct *p);

    #ifdef CONFIG_SMP
    + int (*select_task_rq)(struct task_struct *p, int sync);
    +
    unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
    struct rq *busiest, unsigned long max_load_move,
    struct sched_domain *sd, enum cpu_idle_type idle,
    @@ -960,16 +961,17 @@ struct sched_class {
    void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
    void (*post_schedule) (struct rq *this_rq);
    void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
    -#endif

    - void (*set_curr_task) (struct rq *rq);
    - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
    - void (*task_new) (struct rq *rq, struct task_struct *p);
    void (*set_cpus_allowed)(struct task_struct *p,
    const cpumask_t *newmask);

    void (*rq_online)(struct rq *rq);
    void (*rq_offline)(struct rq *rq);
    +#endif
    +
    + void (*set_curr_task) (struct rq *rq);
    + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
    + void (*task_new) (struct rq *rq, struct task_struct *p);

    void (*switched_from) (struct rq *this_rq, struct task_struct *task,
    int running);
    diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    index f604dae..73bef78 100644
    --- a/kernel/sched_fair.c
    +++ b/kernel/sched_fair.c
    @@ -1576,9 +1576,6 @@ static const struct sched_class fair_sched_class = {
    .enqueue_task = enqueue_task_fair,
    .dequeue_task = dequeue_task_fair,
    .yield_task = yield_task_fair,
    -#ifdef CONFIG_SMP
    - .select_task_rq = select_task_rq_fair,
    -#endif /* CONFIG_SMP */

    .check_preempt_curr = check_preempt_wakeup,

    @@ -1586,6 +1583,8 @@ static const struct sched_class fair_sched_class = {
    .put_prev_task = put_prev_task_fair,

    #ifdef CONFIG_SMP
    + .select_task_rq = select_task_rq_fair,
    +
    .load_balance = load_balance_fair,
    .move_one_task = move_one_task_fair,
    #endif
    diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
    index dec4cca..8a21a2e 100644
    --- a/kernel/sched_idletask.c
    +++ b/kernel/sched_idletask.c
    @@ -105,9 +105,6 @@ static const struct sched_class idle_sched_class = {

    /* dequeue is not valid, we print a debug message there: */
    .dequeue_task = dequeue_task_idle,
    -#ifdef CONFIG_SMP
    - .select_task_rq = select_task_rq_idle,
    -#endif /* CONFIG_SMP */

    .check_preempt_curr = check_preempt_curr_idle,

    @@ -115,6 +112,8 @@ static const struct sched_class idle_sched_class = {
    .put_prev_task = put_prev_task_idle,

    #ifdef CONFIG_SMP
    + .select_task_rq = select_task_rq_idle,
    +
    .load_balance = load_balance_idle,
    .move_one_task = move_one_task_idle,
    #endif
    diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
    index b446dc8..d9ba9d5 100644
    --- a/kernel/sched_rt.c
    +++ b/kernel/sched_rt.c
    @@ -1504,9 +1504,6 @@ static const struct sched_class rt_sched_class = {
    .enqueue_task = enqueue_task_rt,
    .dequeue_task = dequeue_task_rt,
    .yield_task = yield_task_rt,
    -#ifdef CONFIG_SMP
    - .select_task_rq = select_task_rq_rt,
    -#endif /* CONFIG_SMP */

    .check_preempt_curr = check_preempt_curr_rt,

    @@ -1514,6 +1511,8 @@ static const struct sched_class rt_sched_class = {
    .put_prev_task = put_prev_task_rt,

    #ifdef CONFIG_SMP
    + .select_task_rq = select_task_rq_rt,
    +
    .load_balance = load_balance_rt,
    .move_one_task = move_one_task_rt,
    .set_cpus_allowed = set_cpus_allowed_rt,
    --
    1.5.4.rc3


    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. Re: sched: add CONFIG_SMP consistency


    * Li Zefan wrote:

    > How about the following patch?


    i've applied it in the form below. Peter, does it look good to you too?

    Ingo

    -------------->
    From 667ff69f5a248fde6cf871f2c9c5c5d2aa182e71 Mon Sep 17 00:00:00 2001
    From: Li Zefan
    Date: Wed, 22 Oct 2008 15:25:26 +0800
    Subject: [PATCH] sched: add CONFIG_SMP consistency

    a patch from Henrik Austad did this:

    >> Do not declare select_task_rq as part of sched_class when CONFIG_SMP is
    >> not set.


    Peter observed:

    > While a proper cleanup, could you do it by re-arranging the methods so
    > as to not create an additional ifdef?


    Do not declare select_task_rq and some other methods as part of sched_class
    when CONFIG_SMP is not set.

    Also gather those methods to avoid CONFIG_SMP mess.

    Idea-by: Henrik Austad
    Signed-off-by: Li Zefan
    Acked-by: Peter Zijlstra
    Signed-off-by: Ingo Molnar
    ---
    include/linux/sched.h | 12 +++++++-----
    kernel/sched_fair.c | 5 ++---
    kernel/sched_idletask.c | 5 ++---
    kernel/sched_rt.c | 5 ++---
    4 files changed, 13 insertions(+), 14 deletions(-)

    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 4f59c8e..c05b45f 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -897,7 +897,6 @@ struct sched_class {
    void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
    void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
    void (*yield_task) (struct rq *rq);
    - int (*select_task_rq)(struct task_struct *p, int sync);

    void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);

    @@ -905,6 +904,8 @@ struct sched_class {
    void (*put_prev_task) (struct rq *rq, struct task_struct *p);

    #ifdef CONFIG_SMP
    + int (*select_task_rq)(struct task_struct *p, int sync);
    +
    unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
    struct rq *busiest, unsigned long max_load_move,
    struct sched_domain *sd, enum cpu_idle_type idle,
    @@ -916,16 +917,17 @@ struct sched_class {
    void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
    void (*post_schedule) (struct rq *this_rq);
    void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
    -#endif

    - void (*set_curr_task) (struct rq *rq);
    - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
    - void (*task_new) (struct rq *rq, struct task_struct *p);
    void (*set_cpus_allowed)(struct task_struct *p,
    const cpumask_t *newmask);

    void (*rq_online)(struct rq *rq);
    void (*rq_offline)(struct rq *rq);
    +#endif
    +
    + void (*set_curr_task) (struct rq *rq);
    + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
    + void (*task_new) (struct rq *rq, struct task_struct *p);

    void (*switched_from) (struct rq *this_rq, struct task_struct *task,
    int running);
    diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    index a0aa38b..8de48a5 100644
    --- a/kernel/sched_fair.c
    +++ b/kernel/sched_fair.c
    @@ -1593,9 +1593,6 @@ static const struct sched_class fair_sched_class = {
    .enqueue_task = enqueue_task_fair,
    .dequeue_task = dequeue_task_fair,
    .yield_task = yield_task_fair,
    -#ifdef CONFIG_SMP
    - .select_task_rq = select_task_rq_fair,
    -#endif /* CONFIG_SMP */

    .check_preempt_curr = check_preempt_wakeup,

    @@ -1603,6 +1600,8 @@ static const struct sched_class fair_sched_class = {
    .put_prev_task = put_prev_task_fair,

    #ifdef CONFIG_SMP
    + .select_task_rq = select_task_rq_fair,
    +
    .load_balance = load_balance_fair,
    .move_one_task = move_one_task_fair,
    #endif
    diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
    index dec4cca..8a21a2e 100644
    --- a/kernel/sched_idletask.c
    +++ b/kernel/sched_idletask.c
    @@ -105,9 +105,6 @@ static const struct sched_class idle_sched_class = {

    /* dequeue is not valid, we print a debug message there: */
    .dequeue_task = dequeue_task_idle,
    -#ifdef CONFIG_SMP
    - .select_task_rq = select_task_rq_idle,
    -#endif /* CONFIG_SMP */

    .check_preempt_curr = check_preempt_curr_idle,

    @@ -115,6 +112,8 @@ static const struct sched_class idle_sched_class = {
    .put_prev_task = put_prev_task_idle,

    #ifdef CONFIG_SMP
    + .select_task_rq = select_task_rq_idle,
    +
    .load_balance = load_balance_idle,
    .move_one_task = move_one_task_idle,
    #endif
    diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
    index cdf5740..c9aa5be 100644
    --- a/kernel/sched_rt.c
    +++ b/kernel/sched_rt.c
    @@ -1502,9 +1502,6 @@ static const struct sched_class rt_sched_class = {
    .enqueue_task = enqueue_task_rt,
    .dequeue_task = dequeue_task_rt,
    .yield_task = yield_task_rt,
    -#ifdef CONFIG_SMP
    - .select_task_rq = select_task_rq_rt,
    -#endif /* CONFIG_SMP */

    .check_preempt_curr = check_preempt_curr_rt,

    @@ -1512,6 +1509,8 @@ static const struct sched_class rt_sched_class = {
    .put_prev_task = put_prev_task_rt,

    #ifdef CONFIG_SMP
    + .select_task_rq = select_task_rq_rt,
    +
    .load_balance = load_balance_rt,
    .move_one_task = move_one_task_rt,
    .set_cpus_allowed = set_cpus_allowed_rt,
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. Re: sched: add CONFIG_SMP consistency

    On Wed, 2008-10-22 at 09:36 +0200, Ingo Molnar wrote:
    > * Li Zefan wrote:
    >
    > > How about the following patch?

    >
    > i've applied it in the form below. Peter, does it look good to you too?


    yeah,. looks good, thanks guys!
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. Re: sched: add CONFIG_SMP consistency

    On Wednesday 22 October 2008 09:36:40 Ingo Molnar wrote:
    >
    > * Li Zefan wrote:
    >
    > > How about the following patch?

    >
    > i've applied it in the form below. Peter, does it look good to you too?
    >
    > Ingo


    FWIW:
    Acked-by Henrik Austad

    Not boot-tested, just build-tested.

    >
    > -------------->
    > From 667ff69f5a248fde6cf871f2c9c5c5d2aa182e71 Mon Sep 17 00:00:00 2001
    > From: Li Zefan
    > Date: Wed, 22 Oct 2008 15:25:26 +0800
    > Subject: [PATCH] sched: add CONFIG_SMP consistency
    >
    > a patch from Henrik Austad did this:
    >
    > >> Do not declare select_task_rq as part of sched_class when CONFIG_SMP is
    > >> not set.

    >
    > Peter observed:
    >
    > > While a proper cleanup, could you do it by re-arranging the methods so
    > > as to not create an additional ifdef?

    >
    > Do not declare select_task_rq and some other methods as part of sched_class
    > when CONFIG_SMP is not set.
    >
    > Also gather those methods to avoid CONFIG_SMP mess.
    >
    > Idea-by: Henrik Austad


    can you change this to henrik@austad.us? (sorry for error in first patch.. :-))

    > Signed-off-by: Li Zefan
    > Acked-by: Peter Zijlstra
    > Signed-off-by: Ingo Molnar
    > ---
    > include/linux/sched.h | 12 +++++++-----
    > kernel/sched_fair.c | 5 ++---
    > kernel/sched_idletask.c | 5 ++---
    > kernel/sched_rt.c | 5 ++---
    > 4 files changed, 13 insertions(+), 14 deletions(-)
    >
    > diff --git a/include/linux/sched.h b/include/linux/sched.h
    > index 4f59c8e..c05b45f 100644
    > --- a/include/linux/sched.h
    > +++ b/include/linux/sched.h
    > @@ -897,7 +897,6 @@ struct sched_class {
    > void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
    > void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
    > void (*yield_task) (struct rq *rq);
    > - int (*select_task_rq)(struct task_struct *p, int sync);
    >
    > void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
    >
    > @@ -905,6 +904,8 @@ struct sched_class {
    > void (*put_prev_task) (struct rq *rq, struct task_struct *p);
    >
    > #ifdef CONFIG_SMP
    > + int (*select_task_rq)(struct task_struct *p, int sync);
    > +
    > unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
    > struct rq *busiest, unsigned long max_load_move,
    > struct sched_domain *sd, enum cpu_idle_type idle,
    > @@ -916,16 +917,17 @@ struct sched_class {
    > void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
    > void (*post_schedule) (struct rq *this_rq);
    > void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
    > -#endif
    >
    > - void (*set_curr_task) (struct rq *rq);
    > - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
    > - void (*task_new) (struct rq *rq, struct task_struct *p);
    > void (*set_cpus_allowed)(struct task_struct *p,
    > const cpumask_t *newmask);
    >
    > void (*rq_online)(struct rq *rq);
    > void (*rq_offline)(struct rq *rq);
    > +#endif
    > +
    > + void (*set_curr_task) (struct rq *rq);
    > + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
    > + void (*task_new) (struct rq *rq, struct task_struct *p);
    >
    > void (*switched_from) (struct rq *this_rq, struct task_struct *task,
    > int running);
    > diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    > index a0aa38b..8de48a5 100644
    > --- a/kernel/sched_fair.c
    > +++ b/kernel/sched_fair.c
    > @@ -1593,9 +1593,6 @@ static const struct sched_class fair_sched_class = {
    > .enqueue_task = enqueue_task_fair,
    > .dequeue_task = dequeue_task_fair,
    > .yield_task = yield_task_fair,
    > -#ifdef CONFIG_SMP
    > - .select_task_rq = select_task_rq_fair,
    > -#endif /* CONFIG_SMP */
    >
    > .check_preempt_curr = check_preempt_wakeup,
    >
    > @@ -1603,6 +1600,8 @@ static const struct sched_class fair_sched_class = {
    > .put_prev_task = put_prev_task_fair,
    >
    > #ifdef CONFIG_SMP
    > + .select_task_rq = select_task_rq_fair,
    > +
    > .load_balance = load_balance_fair,
    > .move_one_task = move_one_task_fair,
    > #endif
    > diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
    > index dec4cca..8a21a2e 100644
    > --- a/kernel/sched_idletask.c
    > +++ b/kernel/sched_idletask.c
    > @@ -105,9 +105,6 @@ static const struct sched_class idle_sched_class = {
    >
    > /* dequeue is not valid, we print a debug message there: */
    > .dequeue_task = dequeue_task_idle,
    > -#ifdef CONFIG_SMP
    > - .select_task_rq = select_task_rq_idle,
    > -#endif /* CONFIG_SMP */
    >
    > .check_preempt_curr = check_preempt_curr_idle,
    >
    > @@ -115,6 +112,8 @@ static const struct sched_class idle_sched_class = {
    > .put_prev_task = put_prev_task_idle,
    >
    > #ifdef CONFIG_SMP
    > + .select_task_rq = select_task_rq_idle,
    > +
    > .load_balance = load_balance_idle,
    > .move_one_task = move_one_task_idle,
    > #endif
    > diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
    > index cdf5740..c9aa5be 100644
    > --- a/kernel/sched_rt.c
    > +++ b/kernel/sched_rt.c
    > @@ -1502,9 +1502,6 @@ static const struct sched_class rt_sched_class = {
    > .enqueue_task = enqueue_task_rt,
    > .dequeue_task = dequeue_task_rt,
    > .yield_task = yield_task_rt,
    > -#ifdef CONFIG_SMP
    > - .select_task_rq = select_task_rq_rt,
    > -#endif /* CONFIG_SMP */
    >
    > .check_preempt_curr = check_preempt_curr_rt,
    >
    > @@ -1512,6 +1509,8 @@ static const struct sched_class rt_sched_class = {
    > .put_prev_task = put_prev_task_rt,
    >
    > #ifdef CONFIG_SMP
    > + .select_task_rq = select_task_rq_rt,
    > +
    > .load_balance = load_balance_rt,
    > .move_one_task = move_one_task_rt,
    > .set_cpus_allowed = set_cpus_allowed_rt,
    > --
    > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    > the body of a message to majordomo@vger.kernel.org
    > More majordomo info at http://vger.kernel.org/majordomo-info.html
    > Please read the FAQ at http://www.tux.org/lkml/
    >
    >



    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread