diff options
author | paul.moore@hp.com <paul.moore@hp.com> | 2006-10-04 11:46:31 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-10-11 23:59:29 -0700 |
commit | ffb733c65000ee701294f7b80c4eca2a5f335637 (patch) | |
tree | edda8e25792fe4a7bf0c619787949291276b9ed7 | |
parent | c25d5180441e344a3368d100c57f0a481c6944f7 (diff) | |
download | kernel_samsung_tuna-ffb733c65000ee701294f7b80c4eca2a5f335637.zip kernel_samsung_tuna-ffb733c65000ee701294f7b80c4eca2a5f335637.tar.gz kernel_samsung_tuna-ffb733c65000ee701294f7b80c4eca2a5f335637.tar.bz2 |
NetLabel: fix a cache race condition
Testing revealed a problem with the NetLabel cache where a cached entry could
be freed while in use by the LSM layer causing an oops and other problems.
This patch fixes that problem by introducing a reference counter to the cache
entry so that it is only freed when it is no longer in use.
Signed-off-by: Paul Moore <paul.moore@hp.com>
Signed-off-by: James Morris <jmorris@namei.org>
-rw-r--r-- | include/net/netlabel.h | 62 | ||||
-rw-r--r-- | net/ipv4/cipso_ipv4.c | 18 | ||||
-rw-r--r-- | net/netlabel/netlabel_kapi.c | 2 | ||||
-rw-r--r-- | security/selinux/ss/services.c | 37 |
4 files changed, 79 insertions, 40 deletions
diff --git a/include/net/netlabel.h b/include/net/netlabel.h index c63a580..113337c 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h @@ -34,6 +34,7 @@ #include <linux/net.h> #include <linux/skbuff.h> #include <net/netlink.h> +#include <asm/atomic.h> /* * NetLabel - A management interface for maintaining network packet label @@ -106,6 +107,7 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); /* LSM security attributes */ struct netlbl_lsm_cache { + atomic_t refcount; void (*free) (const void *data); void *data; }; @@ -117,7 +119,7 @@ struct netlbl_lsm_secattr { unsigned char *mls_cat; size_t mls_cat_len; - struct netlbl_lsm_cache cache; + struct netlbl_lsm_cache *cache; }; /* @@ -126,6 +128,43 @@ struct netlbl_lsm_secattr { /** + * netlbl_secattr_cache_alloc - Allocate and initialize a secattr cache + * @flags: the memory allocation flags + * + * Description: + * Allocate and initialize a netlbl_lsm_cache structure. Returns a pointer + * on success, NULL on failure. + * + */ +static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(int flags) +{ + struct netlbl_lsm_cache *cache; + + cache = kzalloc(sizeof(*cache), flags); + if (cache) + atomic_set(&cache->refcount, 1); + return cache; +} + +/** + * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct + * @cache: the struct to free + * + * Description: + * Frees @secattr including all of the internal buffers. + * + */ +static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) +{ + if (!atomic_dec_and_test(&cache->refcount)) + return; + + if (cache->free) + cache->free(cache->data); + kfree(cache); +} + +/** * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct * @secattr: the struct to initialize * @@ -143,20 +182,16 @@ static inline int netlbl_secattr_init(struct netlbl_lsm_secattr *secattr) /** * netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct * @secattr: the struct to clear - * @clear_cache: cache clear flag * * Description: * Destroys the @secattr struct, including freeing all of the internal buffers. - * If @clear_cache is true then free the cache fields, otherwise leave them - * intact. The struct must be reset with a call to netlbl_secattr_init() - * before reuse. + * The struct must be reset with a call to netlbl_secattr_init() before reuse. * */ -static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr, - u32 clear_cache) +static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) { - if (clear_cache && secattr->cache.data != NULL && secattr->cache.free) - secattr->cache.free(secattr->cache.data); + if (secattr->cache) + netlbl_secattr_cache_free(secattr->cache); kfree(secattr->domain); kfree(secattr->mls_cat); } @@ -178,17 +213,14 @@ static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(int flags) /** * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct * @secattr: the struct to free - * @clear_cache: cache clear flag * * Description: - * Frees @secattr including all of the internal buffers. If @clear_cache is - * true then free the cache fields, otherwise leave them intact. + * Frees @secattr including all of the internal buffers. * */ -static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr, - u32 clear_cache) +static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr) { - netlbl_secattr_destroy(secattr, clear_cache); + netlbl_secattr_destroy(secattr); kfree(secattr); } diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index a8e2e87..bde8cca 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -43,6 +43,7 @@ #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> +#include <asm/atomic.h> #include <asm/bug.h> struct cipso_v4_domhsh_entry { @@ -79,7 +80,7 @@ struct cipso_v4_map_cache_entry { unsigned char *key; size_t key_len; - struct netlbl_lsm_cache lsm_data; + struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; @@ -188,13 +189,14 @@ static void cipso_v4_doi_domhsh_free(struct rcu_head *entry) * @entry: the entry to free * * Description: - * This function frees the memory associated with a cache entry. + * This function frees the memory associated with a cache entry including the + * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { - if (entry->lsm_data.free) - entry->lsm_data.free(entry->lsm_data.data); + if (entry->lsm_data) + netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } @@ -315,8 +317,8 @@ static int cipso_v4_cache_check(const unsigned char *key, entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; - secattr->cache.free = entry->lsm_data.free; - secattr->cache.data = entry->lsm_data.data; + atomic_inc(&entry->lsm_data->refcount); + secattr->cache = entry->lsm_data; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; @@ -383,8 +385,8 @@ int cipso_v4_cache_add(const struct sk_buff *skb, memcpy(entry->key, cipso_ptr, cipso_ptr_len); entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); - entry->lsm_data.free = secattr->cache.free; - entry->lsm_data.data = secattr->cache.data; + atomic_inc(&secattr->cache->refcount); + entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 54fb7de..ff97110 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -200,7 +200,7 @@ void netlbl_cache_invalidate(void) int netlbl_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { - if (secattr->cache.data == NULL) + if (secattr->cache == NULL) return -ENOMSG; if (CIPSO_V4_OPTEXIST(skb)) diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 0c219a1..bb2d2bc 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2172,7 +2172,12 @@ struct netlbl_cache { */ static void selinux_netlbl_cache_free(const void *data) { - struct netlbl_cache *cache = NETLBL_CACHE(data); + struct netlbl_cache *cache; + + if (data == NULL) + return; + + cache = NETLBL_CACHE(data); switch (cache->type) { case NETLBL_CACHE_T_MLS: ebitmap_destroy(&cache->data.mls_label.level[0].cat); @@ -2197,17 +2202,20 @@ static void selinux_netlbl_cache_add(struct sk_buff *skb, struct context *ctx) struct netlbl_lsm_secattr secattr; netlbl_secattr_init(&secattr); + secattr.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC); + if (secattr.cache == NULL) + goto netlbl_cache_add_return; cache = kzalloc(sizeof(*cache), GFP_ATOMIC); if (cache == NULL) - goto netlbl_cache_add_failure; - secattr.cache.free = selinux_netlbl_cache_free; - secattr.cache.data = (void *)cache; + goto netlbl_cache_add_return; + secattr.cache->free = selinux_netlbl_cache_free; + secattr.cache->data = (void *)cache; cache->type = NETLBL_CACHE_T_MLS; if (ebitmap_cpy(&cache->data.mls_label.level[0].cat, &ctx->range.level[0].cat) != 0) - goto netlbl_cache_add_failure; + goto netlbl_cache_add_return; cache->data.mls_label.level[1].cat.highbit = cache->data.mls_label.level[0].cat.highbit; cache->data.mls_label.level[1].cat.node = @@ -2215,13 +2223,10 @@ static void selinux_netlbl_cache_add(struct sk_buff *skb, struct context *ctx) cache->data.mls_label.level[0].sens = ctx->range.level[0].sens; cache->data.mls_label.level[1].sens = ctx->range.level[0].sens; - if (netlbl_cache_add(skb, &secattr) != 0) - goto netlbl_cache_add_failure; - - return; + netlbl_cache_add(skb, &secattr); -netlbl_cache_add_failure: - netlbl_secattr_destroy(&secattr, 1); +netlbl_cache_add_return: + netlbl_secattr_destroy(&secattr); } /** @@ -2263,8 +2268,8 @@ static int selinux_netlbl_secattr_to_sid(struct sk_buff *skb, POLICY_RDLOCK; - if (secattr->cache.data) { - cache = NETLBL_CACHE(secattr->cache.data); + if (secattr->cache) { + cache = NETLBL_CACHE(secattr->cache->data); switch (cache->type) { case NETLBL_CACHE_T_SID: *sid = cache->data.sid; @@ -2369,7 +2374,7 @@ static int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, &secattr, base_sid, sid); - netlbl_secattr_destroy(&secattr, 0); + netlbl_secattr_destroy(&secattr); return rc; } @@ -2415,7 +2420,7 @@ static int selinux_netlbl_socket_setsid(struct socket *sock, u32 sid) if (rc == 0) sksec->nlbl_state = NLBL_LABELED; - netlbl_secattr_destroy(&secattr, 0); + netlbl_secattr_destroy(&secattr); netlbl_socket_setsid_return: POLICY_RDUNLOCK; @@ -2517,7 +2522,7 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock) sksec->sid, &nlbl_peer_sid) == 0) sksec->peer_sid = nlbl_peer_sid; - netlbl_secattr_destroy(&secattr, 0); + netlbl_secattr_destroy(&secattr); sksec->nlbl_state = NLBL_REQUIRE; |