aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-24 00:54:06 +0000
committerDavid S. Miller <davem@davemloft.net>2010-06-28 23:24:30 -0700
commit33d91f00c73ba0012bce18c1690cb8313ca7adaa (patch)
tree71a927098b248685af4dede30d443ef73c344d16
parent7a9b2d59507d85569b8a456688ef40cf2ac73e48 (diff)
downloadkernel_samsung_aries-33d91f00c73ba0012bce18c1690cb8313ca7adaa.zip
kernel_samsung_aries-33d91f00c73ba0012bce18c1690cb8313ca7adaa.tar.gz
kernel_samsung_aries-33d91f00c73ba0012bce18c1690cb8313ca7adaa.tar.bz2
net: u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh()
- Must disable preemption in case of 32bit UP in u64_stats_fetch_begin() and u64_stats_fetch_retry() - Add new u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh() for network usage, disabling BH on 32bit UP only. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/u64_stats_sync.h59
1 files changed, 44 insertions, 15 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index b38e3a5..fa261a0 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -27,6 +27,9 @@
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could
* read partial values)
*
+ * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
+ * u64_stats_fetch_retry_bh() helpers
+ *
* Usage :
*
* Stats producer (writer) should use following template granted it already got
@@ -58,54 +61,80 @@
*/
#include <linux/seqlock.h>
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
struct u64_stats_sync {
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
seqcount_t seq;
+#endif
};
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&syncp->seq);
+#endif
}
static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq);
+#endif
}
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ preempt_disable();
+#endif
+ return 0;
+#endif
}
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
-}
-
#else
-struct u64_stats_sync {
-};
-
-static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
-{
-}
-
-static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
-{
+#if BITS_PER_LONG==32
+ preempt_enable();
+#endif
+ return false;
+#endif
}
-static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+/*
+ * In case softirq handlers can update u64 counters, readers can use following helpers
+ * - SMP 32bit arches use seqcount protection, irq safe.
+ * - UP 32bit must disable BH.
+ * - 64bit have no problem atomically reading u64 values, irq safe.
+ */
+static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ local_bh_disable();
+#endif
return 0;
+#endif
}
-static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
unsigned int start)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ local_bh_enable();
+#endif
return false;
-}
#endif
+}
#endif /* _LINUX_U64_STATS_SYNC_H */