[RFC PATCH 3/5] kmemtrace: SLAB hooks. - Kernel

This is a discussion on [RFC PATCH 3/5] kmemtrace: SLAB hooks. - Kernel ; This adds hooks for the SLAB allocator, to allow tracing with kmemtrace. Signed-off-by: Eduard - Gabriel Munteanu --- include/linux/slab_def.h | 16 +++++++++++++--- mm/slab.c | 35 +++++++++++++++++++++++++++++------ 2 files changed, 42 insertions(+), 9 deletions(-) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5e..89d0cca 100644 ...

+ Reply to Thread
Results 1 to 10 of 10

Thread: [RFC PATCH 3/5] kmemtrace: SLAB hooks.

  1. [RFC PATCH 3/5] kmemtrace: SLAB hooks.

    This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.

    Signed-off-by: Eduard - Gabriel Munteanu
    ---
    include/linux/slab_def.h | 16 +++++++++++++---
    mm/slab.c | 35 +++++++++++++++++++++++++++++------
    2 files changed, 42 insertions(+), 9 deletions(-)

    diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
    index 39c3a5e..89d0cca 100644
    --- a/include/linux/slab_def.h
    +++ b/include/linux/slab_def.h
    @@ -14,6 +14,7 @@
    #include /* kmalloc_sizes.h needs PAGE_SIZE */
    #include /* kmalloc_sizes.h needs L1_CACHE_BYTES */
    #include
    +#include

    /* Size description struct for general caches. */
    struct cache_sizes {
    @@ -30,6 +31,8 @@ void *__kmalloc(size_t size, gfp_t flags);

    static inline void *kmalloc(size_t size, gfp_t flags)
    {
    + void *ret;
    +
    if (__builtin_constant_p(size)) {
    int i = 0;

    @@ -50,10 +53,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
    found:
    #ifdef CONFIG_ZONE_DMA
    if (flags & GFP_DMA)
    - return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    - flags);
    + ret = kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    + flags | __GFP_NOTRACE);
    + else
    #endif
    - return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
    + ret = kmem_cache_alloc(malloc_sizes[i].cs_cachep,
    + flags | __GFP_NOTRACE);
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL, _THIS_IP_, ret,
    + size, malloc_sizes[i].cs_size, flags);
    +
    + return ret;
    }
    return __kmalloc(size, flags);
    }
    diff --git a/mm/slab.c b/mm/slab.c
    index 046607f..29f0599 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -111,6 +111,7 @@
    #include
    #include
    #include
    +#include

    #include
    #include
    @@ -3621,7 +3622,12 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
    */
    void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    {
    - return __cache_alloc(cachep, flags, __builtin_return_address(0));
    + void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_CACHE, _RET_IP_, ret,
    + obj_size(cachep), obj_size(cachep), flags);
    +
    + return ret;
    }
    EXPORT_SYMBOL(kmem_cache_alloc);

    @@ -3669,8 +3675,14 @@ out:
    #ifdef CONFIG_NUMA
    void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
    {
    - return __cache_alloc_node(cachep, flags, nodeid,
    - __builtin_return_address(0));
    + void *ret = __cache_alloc_node(cachep, flags, nodeid,
    + __builtin_return_address(0));
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_CACHE, _RET_IP_, ret,
    + obj_size(cachep), obj_size(cachep),
    + flags, nodeid);
    +
    + return ret;
    }
    EXPORT_SYMBOL(kmem_cache_alloc_node);

    @@ -3718,6 +3730,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
    void *caller)
    {
    struct kmem_cache *cachep;
    + void *ret;

    /* If you want to save a few bytes .text space: replace
    * __ with kmem_.
    @@ -3726,12 +3739,18 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
    */
    cachep = __find_general_cachep(size, flags);
    if (unlikely(ZERO_OR_NULL_PTR(cachep)))
    - return cachep;
    - return __cache_alloc(cachep, flags, caller);
    + ret = cachep;
    + else {
    + ret = __cache_alloc(cachep, flags, caller);
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL, caller, ret,
    + size, cachep->buffer_size, flags);
    + }
    +
    + return ret;
    }


    -#ifdef CONFIG_DEBUG_SLAB
    +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
    void *__kmalloc(size_t size, gfp_t flags)
    {
    return __do_kmalloc(size, flags, __builtin_return_address(0));
    @@ -3770,6 +3789,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    debug_check_no_obj_freed(objp, obj_size(cachep));
    __cache_free(cachep, objp);
    local_irq_restore(flags);
    +
    + kmemtrace_mark_free(KMEMTRACE_KIND_CACHE, _RET_IP_, objp);
    }
    EXPORT_SYMBOL(kmem_cache_free);

    @@ -3796,6 +3817,8 @@ void kfree(const void *objp)
    debug_check_no_obj_freed(objp, obj_size(c));
    __cache_free(c, (void *)objp);
    local_irq_restore(flags);
    +
    + kmemtrace_mark_free(KMEMTRACE_KIND_KERNEL, _RET_IP_, objp);
    }
    EXPORT_SYMBOL(kfree);

    --
    1.5.6.1
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. Re: [RFC PATCH 3/5] kmemtrace: SLAB hooks.

    Hi Eduard-Gabriel,

    On Thu, Jul 10, 2008 at 9:06 PM, Eduard - Gabriel Munteanu
    wrote:
    > This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.
    >
    > Signed-off-by: Eduard - Gabriel Munteanu
    > static inline void *kmalloc(size_t size, gfp_t flags)
    > {
    > + void *ret;
    > +
    > if (__builtin_constant_p(size)) {
    > int i = 0;
    >
    > @@ -50,10 +53,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
    > found:
    > #ifdef CONFIG_ZONE_DMA
    > if (flags & GFP_DMA)
    > - return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    > - flags);
    > + ret = kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    > + flags | __GFP_NOTRACE);
    > + else
    > #endif
    > - return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
    > + ret = kmem_cache_alloc(malloc_sizes[i].cs_cachep,
    > + flags | __GFP_NOTRACE);
    > +
    > + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL, _THIS_IP_, ret,
    > + size, malloc_sizes[i].cs_size, flags);
    > +
    > + return ret;


    I think this would be cleaner if you'd simply add a new
    __kmem_cache_alloc() entry point in SLAB that takes the "kind" as an
    argument. That way you wouldn't have to play tricks with GFP flags.
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. [PATCH] kmemtrace: SLAB hooks.

    This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.

    Signed-off-by: Eduard - Gabriel Munteanu
    ---

    Dropped the __GFP_NOTRACE thing. Also fixed NUMA tracing and some whitespace
    errors.

    What do you think?

    include/linux/slab_def.h | 56 +++++++++++++++++++++++++++++++++++++-----
    mm/slab.c | 61 +++++++++++++++++++++++++++++++++++++++++----
    2 files changed, 104 insertions(+), 13 deletions(-)

    diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
    index 39c3a5e..040fe72 100644
    --- a/include/linux/slab_def.h
    +++ b/include/linux/slab_def.h
    @@ -14,6 +14,7 @@
    #include /* kmalloc_sizes.h needs PAGE_SIZE */
    #include /* kmalloc_sizes.h needs L1_CACHE_BYTES */
    #include
    +#include

    /* Size description struct for general caches. */
    struct cache_sizes {
    @@ -28,8 +29,20 @@ extern struct cache_sizes malloc_sizes[];
    void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
    void *__kmalloc(size_t size, gfp_t flags);

    +#ifdef CONFIG_KMEMTRACE
    +extern void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);
    +#else
    +static inline void *__kmem_cache_alloc(struct kmem_cache *cachep,
    + gfp_t flags)
    +{
    + return __kmem_cache_alloc(cachep, flags);
    +}
    +#endif
    +
    static inline void *kmalloc(size_t size, gfp_t flags)
    {
    + void *ret;
    +
    if (__builtin_constant_p(size)) {
    int i = 0;

    @@ -50,10 +63,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
    found:
    #ifdef CONFIG_ZONE_DMA
    if (flags & GFP_DMA)
    - return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    - flags);
    + ret = __kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    + flags);
    + else
    #endif
    - return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
    + ret = __kmem_cache_alloc(malloc_sizes[i].cs_cachep,
    + flags);
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL, _THIS_IP_, ret,
    + size, malloc_sizes[i].cs_size, flags);
    +
    + return ret;
    }
    return __kmalloc(size, flags);
    }
    @@ -62,8 +82,23 @@ found:
    extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
    extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

    +#ifdef CONFIG_KMEMTRACE
    +extern void *__kmem_cache_alloc_node(struct kmem_cache *cachep,
    + gfp_t flags,
    + int nodeid);
    +#else
    +static inline void *__kmem_cache_alloc_node(struct kmem_cache *cachep,
    + gfp_t flags,
    + int nodeid)
    +{
    + return kmem_cache_alloc_node(cachep, flags, nodeid);
    +}
    +#endif
    +
    static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
    {
    + void *ret;
    +
    if (__builtin_constant_p(size)) {
    int i = 0;

    @@ -84,11 +119,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
    found:
    #ifdef CONFIG_ZONE_DMA
    if (flags & GFP_DMA)
    - return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
    - flags, node);
    + ret = __kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
    + flags, node);
    + else
    #endif
    - return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
    - flags, node);
    + ret = __kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
    + flags, node);
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_KERNEL, _THIS_IP_,
    + ret, size, malloc_sizes[i].cs_size,
    + flags, node);
    +
    + return ret;
    }
    return __kmalloc_node(size, flags, node);
    }
    diff --git a/mm/slab.c b/mm/slab.c
    index 046607f..f07e022 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -111,6 +111,7 @@
    #include
    #include
    #include
    +#include

    #include
    #include
    @@ -3621,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
    */
    void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    {
    - return __cache_alloc(cachep, flags, __builtin_return_address(0));
    + void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_CACHE, _RET_IP_, ret,
    + obj_size(cachep), obj_size(cachep), flags);
    +
    + return ret;
    }
    EXPORT_SYMBOL(kmem_cache_alloc);

    +#ifdef CONFIG_KMEMTRACE
    +void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    +{
    + return __cache_alloc(cachep, flags, __builtin_return_address(0));
    +}
    +EXPORT_SYMBOL(__kmem_cache_alloc);
    +#endif
    +
    /**
    * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
    * @cachep: the cache we're checking against
    @@ -3669,20 +3683,44 @@ out:
    #ifdef CONFIG_NUMA
    void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
    {
    - return __cache_alloc_node(cachep, flags, nodeid,
    - __builtin_return_address(0));
    + void *ret = __cache_alloc_node(cachep, flags, nodeid,
    + __builtin_return_address(0));
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_CACHE, _RET_IP_, ret,
    + obj_size(cachep), obj_size(cachep),
    + flags, nodeid);
    +
    + return ret;
    }
    EXPORT_SYMBOL(kmem_cache_alloc_node);

    +#ifdef CONFIG_KMEMTRACE
    +void *__kmem_cache_alloc_node(struct kmem_cache *cachep,
    + gfp_t flags,
    + int nodeid)
    +{
    + return __cache_alloc_node(cachep, flags, nodeid,
    + __builtin_return_address(0));
    +}
    +EXPORT_SYMBOL(__kmem_cache_alloc_node);
    +#endif
    +
    static __always_inline void *
    __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
    {
    struct kmem_cache *cachep;
    + void *ret;

    cachep = kmem_find_general_cachep(size, flags);
    if (unlikely(ZERO_OR_NULL_PTR(cachep)))
    return cachep;
    - return kmem_cache_alloc_node(cachep, flags, node);
    + ret = __kmem_cache_alloc_node(cachep, flags, node);
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_KERNEL,
    + (unsigned long) caller, ret,
    + size, cachep->buffer_size, flags, node);
    +
    + return ret;
    }

    #ifdef CONFIG_DEBUG_SLAB
    @@ -3718,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
    void *caller)
    {
    struct kmem_cache *cachep;
    + void *ret;

    /* If you want to save a few bytes .text space: replace
    * __ with kmem_.
    @@ -3727,11 +3766,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
    cachep = __find_general_cachep(size, flags);
    if (unlikely(ZERO_OR_NULL_PTR(cachep)))
    return cachep;
    - return __cache_alloc(cachep, flags, caller);
    + ret = __cache_alloc(cachep, flags, caller);
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL,
    + (unsigned long) caller, ret,
    + size, cachep->buffer_size, flags);
    +
    + return ret;
    }


    -#ifdef CONFIG_DEBUG_SLAB
    +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
    void *__kmalloc(size_t size, gfp_t flags)
    {
    return __do_kmalloc(size, flags, __builtin_return_address(0));
    @@ -3770,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    debug_check_no_obj_freed(objp, obj_size(cachep));
    __cache_free(cachep, objp);
    local_irq_restore(flags);
    +
    + kmemtrace_mark_free(KMEMTRACE_KIND_CACHE, _RET_IP_, objp);
    }
    EXPORT_SYMBOL(kmem_cache_free);

    @@ -3796,6 +3843,8 @@ void kfree(const void *objp)
    debug_check_no_obj_freed(objp, obj_size(c));
    __cache_free(c, (void *)objp);
    local_irq_restore(flags);
    +
    + kmemtrace_mark_free(KMEMTRACE_KIND_KERNEL, _RET_IP_, objp);
    }
    EXPORT_SYMBOL(kfree);

    --
    1.5.6.1

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. Re: [PATCH] kmemtrace: SLAB hooks.

    Hi Eduard-Gabriel,

    On Sat, 2008-07-12 at 22:04 +0300, Eduard - Gabriel Munteanu wrote:
    > This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.
    >
    > Signed-off-by: Eduard - Gabriel Munteanu
    > @@ -28,8 +29,20 @@ extern struct cache_sizes malloc_sizes[];
    > void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
    > void *__kmalloc(size_t size, gfp_t flags);
    >
    > +#ifdef CONFIG_KMEMTRACE
    > +extern void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);
    > +#else
    > +static inline void *__kmem_cache_alloc(struct kmem_cache *cachep,
    > + gfp_t flags)
    > +{
    > + return __kmem_cache_alloc(cachep, flags);


    Looks as if the function calls itself ´╗┐recursively?

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. Re: [PATCH] kmemtrace: SLAB hooks.

    Pekka Enberg wrote:
    > Hi Eduard-Gabriel,
    >
    > On Sat, 2008-07-12 at 22:04 +0300, Eduard - Gabriel Munteanu wrote:
    >> This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.
    >>
    >> Signed-off-by: Eduard - Gabriel Munteanu
    >> @@ -28,8 +29,20 @@ extern struct cache_sizes malloc_sizes[];
    >> void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
    >> void *__kmalloc(size_t size, gfp_t flags);
    >>
    >> +#ifdef CONFIG_KMEMTRACE
    >> +extern void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);
    >> +#else
    >> +static inline void *__kmem_cache_alloc(struct kmem_cache *cachep,
    >> + gfp_t flags)
    >> +{
    >> + return __kmem_cache_alloc(cachep, flags);

    >
    > Looks as if the function calls itself ´╗┐recursively?
    >


    Code not tested? Are you sure you configured for slab?

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. Re: [PATCH] kmemtrace: SLAB hooks.

    On Mon, 14 Jul 2008 11:32:25 -0500
    Christoph Lameter wrote:

    > > Looks as if the function calls itself ´╗┐recursively?
    > >

    >
    > Code not tested? Are you sure you configured for slab?


    This was a stupid typo on my part. I tested, but only with
    CONFIG_KMEMTRACE, which took the 'extern' ifdef branch. I'll resubmit
    in a few minutes.
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. [RESEND PATCH] kmemtrace: SLAB hooks.

    This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.

    Signed-off-by: Eduard - Gabriel Munteanu
    ---

    Okay, the rationale is this:
    1. When CONFIG_KMEMTRACE is set, __kmem_cache_alloc is defined in mm/slab.c,
    where it does not call tracers.
    2. When CONFIG_KMEMTRACE is _not_ set, __kmem_cache_alloc goes through
    the usual kmem_cache_alloc, which contains tracers, but are no-ops because
    CONFIG_KMEMTRACE is not set.

    Therefore, in any case, __kmem_cache_alloc does not do tracing.

    Hope this makes sense.

    include/linux/slab_def.h | 56 +++++++++++++++++++++++++++++++++++++-----
    mm/slab.c | 61 +++++++++++++++++++++++++++++++++++++++++----
    2 files changed, 104 insertions(+), 13 deletions(-)

    diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
    index 39c3a5e..758803f 100644
    --- a/include/linux/slab_def.h
    +++ b/include/linux/slab_def.h
    @@ -14,6 +14,7 @@
    #include /* kmalloc_sizes.h needs PAGE_SIZE */
    #include /* kmalloc_sizes.h needs L1_CACHE_BYTES */
    #include
    +#include

    /* Size description struct for general caches. */
    struct cache_sizes {
    @@ -28,8 +29,20 @@ extern struct cache_sizes malloc_sizes[];
    void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
    void *__kmalloc(size_t size, gfp_t flags);

    +#ifdef CONFIG_KMEMTRACE
    +extern void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);
    +#else
    +static inline void *__kmem_cache_alloc(struct kmem_cache *cachep,
    + gfp_t flags)
    +{
    + return kmem_cache_alloc(cachep, flags);
    +}
    +#endif
    +
    static inline void *kmalloc(size_t size, gfp_t flags)
    {
    + void *ret;
    +
    if (__builtin_constant_p(size)) {
    int i = 0;

    @@ -50,10 +63,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
    found:
    #ifdef CONFIG_ZONE_DMA
    if (flags & GFP_DMA)
    - return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    - flags);
    + ret = __kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
    + flags);
    + else
    #endif
    - return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
    + ret = __kmem_cache_alloc(malloc_sizes[i].cs_cachep,
    + flags);
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL, _THIS_IP_, ret,
    + size, malloc_sizes[i].cs_size, flags);
    +
    + return ret;
    }
    return __kmalloc(size, flags);
    }
    @@ -62,8 +82,23 @@ found:
    extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
    extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

    +#ifdef CONFIG_KMEMTRACE
    +extern void *__kmem_cache_alloc_node(struct kmem_cache *cachep,
    + gfp_t flags,
    + int nodeid);
    +#else
    +static inline void *__kmem_cache_alloc_node(struct kmem_cache *cachep,
    + gfp_t flags,
    + int nodeid)
    +{
    + return kmem_cache_alloc_node(cachep, flags, nodeid);
    +}
    +#endif
    +
    static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
    {
    + void *ret;
    +
    if (__builtin_constant_p(size)) {
    int i = 0;

    @@ -84,11 +119,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
    found:
    #ifdef CONFIG_ZONE_DMA
    if (flags & GFP_DMA)
    - return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
    - flags, node);
    + ret = __kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
    + flags, node);
    + else
    #endif
    - return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
    - flags, node);
    + ret = __kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
    + flags, node);
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_KERNEL, _THIS_IP_,
    + ret, size, malloc_sizes[i].cs_size,
    + flags, node);
    +
    + return ret;
    }
    return __kmalloc_node(size, flags, node);
    }
    diff --git a/mm/slab.c b/mm/slab.c
    index 046607f..f07e022 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -111,6 +111,7 @@
    #include
    #include
    #include
    +#include

    #include
    #include
    @@ -3621,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
    */
    void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    {
    - return __cache_alloc(cachep, flags, __builtin_return_address(0));
    + void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_CACHE, _RET_IP_, ret,
    + obj_size(cachep), obj_size(cachep), flags);
    +
    + return ret;
    }
    EXPORT_SYMBOL(kmem_cache_alloc);

    +#ifdef CONFIG_KMEMTRACE
    +void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
    +{
    + return __cache_alloc(cachep, flags, __builtin_return_address(0));
    +}
    +EXPORT_SYMBOL(__kmem_cache_alloc);
    +#endif
    +
    /**
    * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
    * @cachep: the cache we're checking against
    @@ -3669,20 +3683,44 @@ out:
    #ifdef CONFIG_NUMA
    void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
    {
    - return __cache_alloc_node(cachep, flags, nodeid,
    - __builtin_return_address(0));
    + void *ret = __cache_alloc_node(cachep, flags, nodeid,
    + __builtin_return_address(0));
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_CACHE, _RET_IP_, ret,
    + obj_size(cachep), obj_size(cachep),
    + flags, nodeid);
    +
    + return ret;
    }
    EXPORT_SYMBOL(kmem_cache_alloc_node);

    +#ifdef CONFIG_KMEMTRACE
    +void *__kmem_cache_alloc_node(struct kmem_cache *cachep,
    + gfp_t flags,
    + int nodeid)
    +{
    + return __cache_alloc_node(cachep, flags, nodeid,
    + __builtin_return_address(0));
    +}
    +EXPORT_SYMBOL(__kmem_cache_alloc_node);
    +#endif
    +
    static __always_inline void *
    __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
    {
    struct kmem_cache *cachep;
    + void *ret;

    cachep = kmem_find_general_cachep(size, flags);
    if (unlikely(ZERO_OR_NULL_PTR(cachep)))
    return cachep;
    - return kmem_cache_alloc_node(cachep, flags, node);
    + ret = __kmem_cache_alloc_node(cachep, flags, node);
    +
    + kmemtrace_mark_alloc_node(KMEMTRACE_KIND_KERNEL,
    + (unsigned long) caller, ret,
    + size, cachep->buffer_size, flags, node);
    +
    + return ret;
    }

    #ifdef CONFIG_DEBUG_SLAB
    @@ -3718,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
    void *caller)
    {
    struct kmem_cache *cachep;
    + void *ret;

    /* If you want to save a few bytes .text space: replace
    * __ with kmem_.
    @@ -3727,11 +3766,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
    cachep = __find_general_cachep(size, flags);
    if (unlikely(ZERO_OR_NULL_PTR(cachep)))
    return cachep;
    - return __cache_alloc(cachep, flags, caller);
    + ret = __cache_alloc(cachep, flags, caller);
    +
    + kmemtrace_mark_alloc(KMEMTRACE_KIND_KERNEL,
    + (unsigned long) caller, ret,
    + size, cachep->buffer_size, flags);
    +
    + return ret;
    }


    -#ifdef CONFIG_DEBUG_SLAB
    +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
    void *__kmalloc(size_t size, gfp_t flags)
    {
    return __do_kmalloc(size, flags, __builtin_return_address(0));
    @@ -3770,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
    debug_check_no_obj_freed(objp, obj_size(cachep));
    __cache_free(cachep, objp);
    local_irq_restore(flags);
    +
    + kmemtrace_mark_free(KMEMTRACE_KIND_CACHE, _RET_IP_, objp);
    }
    EXPORT_SYMBOL(kmem_cache_free);

    @@ -3796,6 +3843,8 @@ void kfree(const void *objp)
    debug_check_no_obj_freed(objp, obj_size(c));
    __cache_free(c, (void *)objp);
    local_irq_restore(flags);
    +
    + kmemtrace_mark_free(KMEMTRACE_KIND_KERNEL, _RET_IP_, objp);
    }
    EXPORT_SYMBOL(kfree);

    --
    1.5.6.1

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. Re: [RESEND PATCH] kmemtrace: SLAB hooks.

    Hi Eduard-Gabriel,

    On Mon, 2008-07-14 at 20:42 +0300, Eduard - Gabriel Munteanu wrote:
    > This adds hooks for the SLAB allocator, to allow tracing with
    > kmemtrace.
    >
    > Signed-off-by: Eduard - Gabriel Munteanu
    > @@ -28,8 +29,20 @@ extern struct cache_sizes malloc_sizes[];
    > void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
    > void *__kmalloc(size_t size, gfp_t flags);
    >
    > +#ifdef CONFIG_KMEMTRACE
    > +extern void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);
    > +#else
    > +static inline void *__kmem_cache_alloc(struct kmem_cache *cachep,
    > + gfp_t flags)
    > +{
    > + return kmem_cache_alloc(cachep, flags);
    > +}
    > +#endif
    > +


    I'm okay with this approach but then you need to do
    s/__kmem_cache_alloc/kmem_cache_alloc_trace/ or similar. In the kernel,
    it's always the *upper* level function that doesn't have the
    underscores.

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  9. Re: [RESEND PATCH] kmemtrace: SLAB hooks.

    On Mon, Jul 14, 2008 at 09:19:48PM +0300, Pekka Enberg wrote:
    > Hi Eduard-Gabriel,
    >
    > On Mon, 2008-07-14 at 20:42 +0300, Eduard - Gabriel Munteanu wrote:
    > > This adds hooks for the SLAB allocator, to allow tracing with
    > > kmemtrace.
    > >
    > > Signed-off-by: Eduard - Gabriel Munteanu
    > > @@ -28,8 +29,20 @@ extern struct cache_sizes malloc_sizes[];
    > > void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
    > > void *__kmalloc(size_t size, gfp_t flags);
    > >
    > > +#ifdef CONFIG_KMEMTRACE
    > > +extern void *__kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);
    > > +#else
    > > +static inline void *__kmem_cache_alloc(struct kmem_cache *cachep,
    > > + gfp_t flags)
    > > +{
    > > + return kmem_cache_alloc(cachep, flags);
    > > +}
    > > +#endif
    > > +

    >
    > I'm okay with this approach but then you need to do
    > s/__kmem_cache_alloc/kmem_cache_alloc_trace/ or similar. In the kernel,
    > it's always the *upper* level function that doesn't have the
    > underscores.


    Hmm, doesn't really make sense:
    1. This should be called kmem_cache_alloc_notrace, not *_trace.
    __kmem_cache_alloc() _disables_ tracing.
    2. __kmem_cache_alloc is not really upper level now, since it's called
    only in kmalloc. So it's an internal function which is not supposed to
    be used by other kernel code.

    Are you sure I should do this?


    Eduard

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  10. Re: [RESEND PATCH] kmemtrace: SLAB hooks.

    On Mon, 2008-07-14 at 21:37 +0300, eduard.munteanu@linux360.ro wrote:
    > > I'm okay with this approach but then you need to do
    > > s/__kmem_cache_alloc/kmem_cache_alloc_trace/ or similar. In the kernel,
    > > it's always the *upper* level function that doesn't have the
    > > underscores.

    >
    > Hmm, doesn't really make sense:
    > 1. This should be called kmem_cache_alloc_notrace, not *_trace.
    > __kmem_cache_alloc() _disables_ tracing.


    kmem_cache_alloc_notrace() sounds good to me.

    > 2. __kmem_cache_alloc is not really upper level now, since it's called
    > only in kmalloc. So it's an internal function which is not supposed to
    > be used by other kernel code.
    >
    > Are you sure I should do this?


    Yes.

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread