summaryrefslogtreecommitdiff
path: root/net/rds/send.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-24 00:39:07 (GMT)
committerAndy Grover <andy.grover@oracle.com>2010-09-09 01:12:12 (GMT)
commit049ee3f500954176a87f22e6ee3e98aecb1b8958 (patch)
tree5dfd8cf3e6d9a7a15e80f6ddee7f4ce7c4aa7a8c /net/rds/send.c
parentf17a1a55fb672d7f64be7f2e940ef5669e5efa0a (diff)
downloadlinux-049ee3f500954176a87f22e6ee3e98aecb1b8958.tar.xz
RDS: Change send lock from a mutex to a spinlock
This change allows us to call rds_send_xmit() from a tasklet, which is crucial to our new operating model. * Change c_send_lock to a spinlock * Update stats fields "sem_" to "_lock" * Remove unneeded rds_conn_is_sending() About locking between shutdown and send -- send checks if the connection is up. Shutdown puts the connection into DISCONNECTING. After this, all threads entering send will exit immediately. However, a thread could be *in* send_xmit(), so shutdown acquires the c_send_lock to ensure everyone is out before proceeding with connection shutdown. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r--net/rds/send.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index 8a0647a..d4feec6 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -116,19 +116,18 @@ int rds_send_xmit(struct rds_connection *conn)
int was_empty = 0;
LIST_HEAD(to_be_dropped);
+ if (!rds_conn_up(conn))
+ goto out;
+
/*
* sendmsg calls here after having queued its message on the send
* queue. We only have one task feeding the connection at a time. If
* another thread is already feeding the queue then we back off. This
* avoids blocking the caller and trading per-connection data between
* caches per message.
- *
- * The sem holder will issue a retry if they notice that someone queued
- * a message after they stopped walking the send queue but before they
- * dropped the sem.
*/
- if (!mutex_trylock(&conn->c_send_lock)) {
- rds_stats_inc(s_send_sem_contention);
+ if (!spin_trylock_irqsave(&conn->c_send_lock, flags)) {
+ rds_stats_inc(s_send_lock_contention);
ret = -ENOMEM;
goto out;
}
@@ -346,7 +345,7 @@ int rds_send_xmit(struct rds_connection *conn)
* stop processing the loop when the transport hasn't taken
* responsibility for forward progress.
*/
- mutex_unlock(&conn->c_send_lock);
+ spin_unlock_irqrestore(&conn->c_send_lock, flags);
if (send_quota == 0 && !was_empty) {
/* We exhausted the send quota, but there's work left to
@@ -360,7 +359,7 @@ int rds_send_xmit(struct rds_connection *conn)
* spin lock */
spin_lock_irqsave(&conn->c_lock, flags);
if (!list_empty(&conn->c_send_queue)) {
- rds_stats_inc(s_send_sem_queue_raced);
+ rds_stats_inc(s_send_lock_queue_raced);
ret = -EAGAIN;
}
spin_unlock_irqrestore(&conn->c_lock, flags);