summaryrefslogtreecommitdiff
path: root/net/rds
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-04-15 20:38:14 (GMT)
committerAndy Grover <andy.grover@oracle.com>2010-09-09 01:12:24 (GMT)
commit9e29db0e3645cafa980e68a9c717a761448389e1 (patch)
tree4ce87432acbda8e16c27c97f3d1ae5339bb53559 /net/rds
parentacfcd4d4ec4ed8cb504f96d4fabb7a94029b362b (diff)
downloadlinux-9e29db0e3645cafa980e68a9c717a761448389e1.tar.xz
RDS: Use a generation counter to avoid rds_send_xmit loop
rds_send_xmit is required to loop around after it releases the lock because someone else could done a trylock, found someone working on the list and backed off. But, once we drop our lock, it is possible that someone else does come in and make progress on the list. We should detect this and not loop around if another process is actually working on the list. This patch adds a generation counter that is bumped every time we get the lock and do some send work. If the retry notices someone else has bumped the generation counter, it does not need to loop around and continue working. Signed-off-by: Chris Mason <chris.mason@oracle.com> Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rds/send.c11
3 files changed, 9 insertions, 4 deletions
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 56aebe4..7e4e9df 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -147,6 +147,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
conn->c_next_tx_seq = 1;
spin_lock_init(&conn->c_send_lock);
+ atomic_set(&conn->c_send_generation, 1);
INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 2f19d49..b57cb50 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -92,6 +92,7 @@ struct rds_connection {
struct rds_cong_map *c_fcong;
spinlock_t c_send_lock; /* protect send ring */
+ atomic_t c_send_generation;
struct rds_message *c_xmit_rm;
unsigned long c_xmit_sg;
unsigned int c_xmit_hdr_off;
diff --git a/net/rds/send.c b/net/rds/send.c
index de5693c..663fd60 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -112,6 +112,7 @@ int rds_send_xmit(struct rds_connection *conn)
unsigned int tmp;
struct scatterlist *sg;
int ret = 0;
+ int gen = 0;
LIST_HEAD(to_be_dropped);
restart:
@@ -134,6 +135,8 @@ restart:
if (conn->c_trans->xmit_prepare)
conn->c_trans->xmit_prepare(conn);
+ gen = atomic_inc_return(&conn->c_send_generation);
+
/*
* spin trying to push headers and data down the connection until
* the connection doesn't make forward progress.
@@ -359,13 +362,13 @@ restart:
if (ret == 0) {
/* A simple bit test would be way faster than taking the
* spin lock */
- spin_lock_irqsave(&conn->c_lock, flags);
+ smp_mb();
if (!list_empty(&conn->c_send_queue)) {
rds_stats_inc(s_send_lock_queue_raced);
- spin_unlock_irqrestore(&conn->c_lock, flags);
- goto restart;
+ if (gen == atomic_read(&conn->c_send_generation)) {
+ goto restart;
+ }
}
- spin_unlock_irqrestore(&conn->c_lock, flags);
}
out:
return ret;