aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAllan Stephens <allan.stephens@windriver.com>2008-04-15 00:15:50 -0700
committerDavid S. Miller <davem@davemloft.net>2008-04-15 00:15:50 -0700
commit1819b83718dc3fe0aea0a2c3cd48d617e2003606 (patch)
treeaad3787178db838ac21b8e0dd05b472377cc5a74
parent7a8036c2b93c8301afce8f75ac099c347bad569d (diff)
downloadkernel_samsung_tuna-1819b83718dc3fe0aea0a2c3cd48d617e2003606.zip
kernel_samsung_tuna-1819b83718dc3fe0aea0a2c3cd48d617e2003606.tar.gz
kernel_samsung_tuna-1819b83718dc3fe0aea0a2c3cd48d617e2003606.tar.bz2
[TIPC]: Correct "off by 1" error in socket queue limit enforcement
This patch fixes a bug that allowed TIPC to queue 1 more message than allowed by the socket receive queue threshold limits. The patch also improves the threshold code's logic and naming to help prevent this sort of error from recurring in the future. Signed-off-by: Allan Stephens <allan.stephens@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/tipc/socket.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ead22e5..4c83aba 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1079,15 +1079,15 @@ exit:
}
/**
- * queue_overloaded - test if queue overload condition exists
+ * rx_queue_full - determine if receive queue can accept another message
+ * @msg: message to be added to queue
* @queue_size: current size of queue
* @base: nominal maximum size of queue
- * @msg: message to be added to queue
*
- * Returns 1 if queue is currently overloaded, 0 otherwise
+ * Returns 1 if queue is unable to accept message, 0 otherwise
*/
-static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
+static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
{
u32 threshold;
u32 imp = msg_importance(msg);
@@ -1104,7 +1104,7 @@ static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
if (msg_connected(msg))
threshold *= 4;
- return (queue_size > threshold);
+ return (queue_size >= threshold);
}
/**
@@ -1189,16 +1189,14 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
/* Reject message if there isn't room to queue it */
- if (unlikely((u32)atomic_read(&tipc_queue_size) >
- OVERLOAD_LIMIT_BASE)) {
- if (queue_overloaded(atomic_read(&tipc_queue_size),
- OVERLOAD_LIMIT_BASE, msg))
+ recv_q_len = (u32)atomic_read(&tipc_queue_size);
+ if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
+ if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
return TIPC_ERR_OVERLOAD;
}
recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
- if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
- if (queue_overloaded(recv_q_len,
- OVERLOAD_LIMIT_BASE / 2, msg))
+ if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
+ if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
return TIPC_ERR_OVERLOAD;
}