From 305a47b17c6efcc0e7b67b0bd41e2c12b7af758b Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 16 Jan 2009 16:21:12 +0000 Subject: dlm: Change rwlock which is only used in write mode to a spinlock The ls_dirtbl[].lock was an rwlock, but since it was only used in write mode a spinlock will suffice. Signed-off-by: Steven Whitehouse Signed-off-by: David Teigland diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 92969f8..858fba1 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -156,7 +156,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen bucket = dir_hash(ls, name, namelen); - write_lock(&ls->ls_dirtbl[bucket].lock); + spin_lock(&ls->ls_dirtbl[bucket].lock); de = search_bucket(ls, name, namelen, bucket); @@ -173,7 +173,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen list_del(&de->list); kfree(de); out: - write_unlock(&ls->ls_dirtbl[bucket].lock); + spin_unlock(&ls->ls_dirtbl[bucket].lock); } void dlm_dir_clear(struct dlm_ls *ls) @@ -185,14 +185,14 @@ void dlm_dir_clear(struct dlm_ls *ls) DLM_ASSERT(list_empty(&ls->ls_recover_list), ); for (i = 0; i < ls->ls_dirtbl_size; i++) { - write_lock(&ls->ls_dirtbl[i].lock); + spin_lock(&ls->ls_dirtbl[i].lock); head = &ls->ls_dirtbl[i].list; while (!list_empty(head)) { de = list_entry(head->next, struct dlm_direntry, list); list_del(&de->list); put_free_de(ls, de); } - write_unlock(&ls->ls_dirtbl[i].lock); + spin_unlock(&ls->ls_dirtbl[i].lock); } } @@ -307,17 +307,17 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name, bucket = dir_hash(ls, name, namelen); - write_lock(&ls->ls_dirtbl[bucket].lock); + spin_lock(&ls->ls_dirtbl[bucket].lock); de = search_bucket(ls, name, namelen, bucket); if (de) { *r_nodeid = de->master_nodeid; - write_unlock(&ls->ls_dirtbl[bucket].lock); + spin_unlock(&ls->ls_dirtbl[bucket].lock); if (*r_nodeid == nodeid) return -EEXIST; return 0; } - write_unlock(&ls->ls_dirtbl[bucket].lock); + spin_unlock(&ls->ls_dirtbl[bucket].lock); if (namelen > DLM_RESNAME_MAXLEN) return -EINVAL; @@ -330,7 +330,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name, de->length = namelen; memcpy(de->name, name, namelen); - write_lock(&ls->ls_dirtbl[bucket].lock); + spin_lock(&ls->ls_dirtbl[bucket].lock); tmp = search_bucket(ls, name, namelen, bucket); if (tmp) { kfree(de); @@ -339,7 +339,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name, list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list); } *r_nodeid = de->master_nodeid; - write_unlock(&ls->ls_dirtbl[bucket].lock); + spin_unlock(&ls->ls_dirtbl[bucket].lock); return 0; } diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 076e86f..d01ca0a 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -99,7 +99,7 @@ struct dlm_direntry { struct dlm_dirtable { struct list_head list; - rwlock_t lock; + spinlock_t lock; }; struct dlm_rsbtable { diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index aa32e5f..cd8e2df 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -487,7 +487,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace, goto out_lkbfree; for (i = 0; i < size; i++) { INIT_LIST_HEAD(&ls->ls_dirtbl[i].list); - rwlock_init(&ls->ls_dirtbl[i].lock); + spin_lock_init(&ls->ls_dirtbl[i].lock); } INIT_LIST_HEAD(&ls->ls_waiters); -- cgit v0.10.2 From 44ad532b3277f0cae55bfe0625d3140cf73af450 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 22 Jan 2009 13:24:49 -0800 Subject: dlm: use ipv6_addr_copy Signed-off-by: Joe Perches Signed-off-by: David Teigland diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 103a5eb..bf09262 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -53,6 +53,7 @@ #include #include #include +#include #include "dlm_internal.h" #include "lowcomms.h" @@ -250,8 +251,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) } else { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; - memcpy(&ret6->sin6_addr, &in6->sin6_addr, - sizeof(in6->sin6_addr)); + ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr); } return 0; -- cgit v0.10.2 From 2cf12c0bf261e19d9641d7b8aa220e2651a03289 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 22 Jan 2009 13:26:47 -0800 Subject: dlm: comment typo fixes Signed-off-by: Joe Perches Signed-off-by: David Teigland diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index bf09262..982314c 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -21,7 +21,7 @@ * * Cluster nodes are referred to by their nodeids. nodeids are * simply 32 bit numbers to the locking module - if they need to - * be expanded for the cluster infrastructure then that is it's + * be expanded for the cluster infrastructure then that is its * responsibility. It is this layer's * responsibility to resolve these into IP address or * whatever it needs for inter-node communication. @@ -36,9 +36,9 @@ * of high load. Also, this way, the sending thread can collect together * messages bound for one node and send them in one block. * - * lowcomms will choose to use wither TCP or SCTP as its transport layer + * lowcomms will choose to use either TCP or SCTP as its transport layer * depending on the configuration variable 'protocol'. This should be set - * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a + * to 0 (default) for TCP or 1 for SCTP. It should be configured using a * cluster-wide mechanism as it must be the same on all nodes of the cluster * for the DLM to function. * -- cgit v0.10.2 From 5e9ccc372dc855900c4a75b21286038938e288c7 Mon Sep 17 00:00:00 2001 From: Christine Caulfield Date: Wed, 28 Jan 2009 12:57:40 -0600 Subject: dlm: replace idr with hash table for connections Integer nodeids can be too large for the idr code; use a hash table instead. Signed-off-by: Christine Caulfield Signed-off-by: David Teigland diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 982314c..609108a 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -61,6 +60,7 @@ #include "config.h" #define NEEDED_RMEM (4*1024*1024) +#define CONN_HASH_SIZE 32 struct cbuf { unsigned int base; @@ -115,6 +115,7 @@ struct connection { int retries; #define MAX_CONNECT_RETRIES 3 int sctp_assoc; + struct hlist_node list; struct connection *othercon; struct work_struct rwork; /* Receive workqueue */ struct work_struct swork; /* Send workqueue */ @@ -139,14 +140,37 @@ static int dlm_local_count; static struct workqueue_struct *recv_workqueue; static struct workqueue_struct *send_workqueue; -static DEFINE_IDR(connections_idr); +static struct hlist_head connection_hash[CONN_HASH_SIZE]; static DEFINE_MUTEX(connections_lock); -static int max_nodeid; static struct kmem_cache *con_cache; static void process_recv_sockets(struct work_struct *work); static void process_send_sockets(struct work_struct *work); + +/* This is deliberately very simple because most clusters have simple + sequential nodeids, so we should be able to go straight to a connection + struct in the array */ +static inline int nodeid_hash(int nodeid) +{ + return nodeid & (CONN_HASH_SIZE-1); +} + +static struct connection *__find_con(int nodeid) +{ + int r; + struct hlist_node *h; + struct connection *con; + + r = nodeid_hash(nodeid); + + hlist_for_each_entry(con, h, &connection_hash[r], list) { + if (con->nodeid == nodeid) + return con; + } + return NULL; +} + /* * If 'allocation' is zero then we don't attempt to create a new * connection structure for this node. @@ -155,31 +179,17 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc) { struct connection *con = NULL; int r; - int n; - con = idr_find(&connections_idr, nodeid); + con = __find_con(nodeid); if (con || !alloc) return con; - r = idr_pre_get(&connections_idr, alloc); - if (!r) - return NULL; - con = kmem_cache_zalloc(con_cache, alloc); if (!con) return NULL; - r = idr_get_new_above(&connections_idr, con, nodeid, &n); - if (r) { - kmem_cache_free(con_cache, con); - return NULL; - } - - if (n != nodeid) { - idr_remove(&connections_idr, n); - kmem_cache_free(con_cache, con); - return NULL; - } + r = nodeid_hash(nodeid); + hlist_add_head(&con->list, &connection_hash[r]); con->nodeid = nodeid; mutex_init(&con->sock_mutex); @@ -190,19 +200,30 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc) /* Setup action pointers for child sockets */ if (con->nodeid) { - struct connection *zerocon = idr_find(&connections_idr, 0); + struct connection *zerocon = __find_con(0); con->connect_action = zerocon->connect_action; if (!con->rx_action) con->rx_action = zerocon->rx_action; } - if (nodeid > max_nodeid) - max_nodeid = nodeid; - return con; } +/* Loop round all connections */ +static void foreach_conn(void (*conn_func)(struct connection *c)) +{ + int i; + struct hlist_node *h, *n; + struct connection *con; + + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ + conn_func(con); + } + } +} + static struct connection *nodeid2con(int nodeid, gfp_t allocation) { struct connection *con; @@ -218,14 +239,17 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation) static struct connection *assoc2con(int assoc_id) { int i; + struct hlist_node *h; struct connection *con; mutex_lock(&connections_lock); - for (i=0; i<=max_nodeid; i++) { - con = __nodeid2con(i, 0); - if (con && con->sctp_assoc == assoc_id) { - mutex_unlock(&connections_lock); - return con; + + for (i = 0 ; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry(con, h, &connection_hash[i], list) { + if (con && con->sctp_assoc == assoc_id) { + mutex_unlock(&connections_lock); + return con; + } } } mutex_unlock(&connections_lock); @@ -376,25 +400,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd) log_print("send EOF to node failed: %d", ret); } +static void sctp_init_failed_foreach(struct connection *con) +{ + con->sctp_assoc = 0; + if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { + if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) + queue_work(send_workqueue, &con->swork); + } +} + /* INIT failed but we don't know which node... restart INIT on all pending nodes */ static void sctp_init_failed(void) { - int i; - struct connection *con; - mutex_lock(&connections_lock); - for (i=1; i<=max_nodeid; i++) { - con = __nodeid2con(i, 0); - if (!con) - continue; - con->sctp_assoc = 0; - if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { - if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { - queue_work(send_workqueue, &con->swork); - } - } - } + + foreach_conn(sctp_init_failed_foreach); + mutex_unlock(&connections_lock); } @@ -1313,13 +1335,10 @@ out_connect: static void clean_one_writequeue(struct connection *con) { - struct list_head *list; - struct list_head *temp; + struct writequeue_entry *e, *safe; spin_lock(&con->writequeue_lock); - list_for_each_safe(list, temp, &con->writequeue) { - struct writequeue_entry *e = - list_entry(list, struct writequeue_entry, list); + list_for_each_entry_safe(e, safe, &con->writequeue, list) { list_del(&e->list); free_entry(e); } @@ -1369,14 +1388,7 @@ static void process_send_sockets(struct work_struct *work) /* Discard all entries on the write queues */ static void clean_writequeues(void) { - int nodeid; - - for (nodeid = 1; nodeid <= max_nodeid; nodeid++) { - struct connection *con = __nodeid2con(nodeid, 0); - - if (con) - clean_one_writequeue(con); - } + foreach_conn(clean_one_writequeue); } static void work_stop(void) @@ -1406,23 +1418,29 @@ static int work_start(void) return 0; } -void dlm_lowcomms_stop(void) +static void stop_conn(struct connection *con) { - int i; - struct connection *con; + con->flags |= 0x0F; + if (con->sock) + con->sock->sk->sk_user_data = NULL; +} +static void free_conn(struct connection *con) +{ + close_connection(con, true); + if (con->othercon) + kmem_cache_free(con_cache, con->othercon); + hlist_del(&con->list); + kmem_cache_free(con_cache, con); +} + +void dlm_lowcomms_stop(void) +{ /* Set all the flags to prevent any socket activity. */ mutex_lock(&connections_lock); - for (i = 0; i <= max_nodeid; i++) { - con = __nodeid2con(i, 0); - if (con) { - con->flags |= 0x0F; - if (con->sock) - con->sock->sk->sk_user_data = NULL; - } - } + foreach_conn(stop_conn); mutex_unlock(&connections_lock); work_stop(); @@ -1430,25 +1448,20 @@ void dlm_lowcomms_stop(void) mutex_lock(&connections_lock); clean_writequeues(); - for (i = 0; i <= max_nodeid; i++) { - con = __nodeid2con(i, 0); - if (con) { - close_connection(con, true); - if (con->othercon) - kmem_cache_free(con_cache, con->othercon); - kmem_cache_free(con_cache, con); - } - } - max_nodeid = 0; + foreach_conn(free_conn); + mutex_unlock(&connections_lock); kmem_cache_destroy(con_cache); - idr_init(&connections_idr); } int dlm_lowcomms_start(void) { int error = -EINVAL; struct connection *con; + int i; + + for (i = 0; i < CONN_HASH_SIZE; i++) + INIT_HLIST_HEAD(&connection_hash[i]); init_local(); if (!dlm_local_count) { -- cgit v0.10.2 From 43279e5376017c40b4be9af5bc79cbb4ef6f53d7 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Wed, 28 Jan 2009 14:37:54 -0600 Subject: dlm: clear defunct cancel state When a conversion completes successfully and finds that a cancel of the convert is still in progress (which is now a moot point), preemptively clear the state associated with outstanding cancel. That state could cause a subsequent conversion to be ignored. Also, improve the consistency and content of error and debug messages in this area. Signed-off-by: David Teigland diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 01e7d39..8cb9204 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -835,7 +835,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype) lkb->lkb_wait_count++; hold_lkb(lkb); - log_debug(ls, "add overlap %x cur %d new %d count %d flags %x", + log_debug(ls, "addwait %x cur %d overlap %d count %d f %x", lkb->lkb_id, lkb->lkb_wait_type, mstype, lkb->lkb_wait_count, lkb->lkb_flags); goto out; @@ -851,7 +851,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype) list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); out: if (error) - log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s", + log_error(ls, "addwait error %x %d flags %x %d %d %s", lkb->lkb_id, error, lkb->lkb_flags, mstype, lkb->lkb_wait_type, lkb->lkb_resource->res_name); mutex_unlock(&ls->ls_waiters_mutex); @@ -863,23 +863,55 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype) request reply on the requestqueue) between dlm_recover_waiters_pre() which set RESEND and dlm_recover_waiters_post() */ -static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype) +static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, + struct dlm_message *ms) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int overlap_done = 0; if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) { + log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; overlap_done = 1; goto out_del; } if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) { + log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; overlap_done = 1; goto out_del; } + /* Cancel state was preemptively cleared by a successful convert, + see next comment, nothing to do. */ + + if ((mstype == DLM_MSG_CANCEL_REPLY) && + (lkb->lkb_wait_type != DLM_MSG_CANCEL)) { + log_debug(ls, "remwait %x cancel_reply wait_type %d", + lkb->lkb_id, lkb->lkb_wait_type); + return -1; + } + + /* Remove for the convert reply, and premptively remove for the + cancel reply. A convert has been granted while there's still + an outstanding cancel on it (the cancel is moot and the result + in the cancel reply should be 0). We preempt the cancel reply + because the app gets the convert result and then can follow up + with another op, like convert. This subsequent op would see the + lingering state of the cancel and fail with -EBUSY. */ + + if ((mstype == DLM_MSG_CONVERT_REPLY) && + (lkb->lkb_wait_type == DLM_MSG_CONVERT) && + is_overlap_cancel(lkb) && ms && !ms->m_result) { + log_debug(ls, "remwait %x convert_reply zap overlap_cancel", + lkb->lkb_id); + lkb->lkb_wait_type = 0; + lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; + lkb->lkb_wait_count--; + goto out_del; + } + /* N.B. type of reply may not always correspond to type of original msg due to lookup->request optimization, verify others? */ @@ -888,8 +920,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype) goto out_del; } - log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d", - lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type); + log_error(ls, "remwait error %x reply %d flags %x no wait_type", + lkb->lkb_id, mstype, lkb->lkb_flags); return -1; out_del: @@ -899,7 +931,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype) this would happen */ if (overlap_done && lkb->lkb_wait_type) { - log_error(ls, "remove_from_waiters %x reply %d give up on %d", + log_error(ls, "remwait error %x reply %d wait_type %d overlap", lkb->lkb_id, mstype, lkb->lkb_wait_type); lkb->lkb_wait_count--; lkb->lkb_wait_type = 0; @@ -921,7 +953,7 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) int error; mutex_lock(&ls->ls_waiters_mutex); - error = _remove_from_waiters(lkb, mstype); + error = _remove_from_waiters(lkb, mstype, NULL); mutex_unlock(&ls->ls_waiters_mutex); return error; } @@ -936,7 +968,7 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms) if (ms != &ls->ls_stub_ms) mutex_lock(&ls->ls_waiters_mutex); - error = _remove_from_waiters(lkb, ms->m_type); + error = _remove_from_waiters(lkb, ms->m_type, ms); if (ms != &ls->ls_stub_ms) mutex_unlock(&ls->ls_waiters_mutex); return error; @@ -2083,6 +2115,11 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, lkb->lkb_timeout_cs = args->timeout; rv = 0; out: + if (rv) + log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s", + rv, lkb->lkb_id, lkb->lkb_flags, args->flags, + lkb->lkb_status, lkb->lkb_wait_type, + lkb->lkb_resource->res_name); return rv; } -- cgit v0.10.2 From a536e38125fe5da8ed49690f30c30a8f651cf1f5 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Fri, 27 Feb 2009 15:23:28 -0600 Subject: dlm: ignore cancel on granted lock Return immediately from dlm_unlock(CANCEL) if the lock is granted and not being converted; there's nothing to cancel. Signed-off-by: David Teigland diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 8cb9204..205ec95 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -2186,6 +2186,13 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) goto out; } + /* there's nothing to cancel */ + if (lkb->lkb_status == DLM_LKSTS_GRANTED && + !lkb->lkb_wait_type) { + rv = -EBUSY; + goto out; + } + switch (lkb->lkb_wait_type) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: -- cgit v0.10.2 From 1fecb1c4b62881e3689ba2dcf93072ae301b597c Mon Sep 17 00:00:00 2001 From: David Teigland Date: Wed, 4 Mar 2009 11:17:23 -0600 Subject: dlm: fix length calculation in compat code Using offsetof() to calculate name length does not work because it does not produce consistent results with with structure packing. This caused memcpy to corrupt memory by copying 4 extra bytes off the end of the buffer on 64 bit kernels with 32 bit userspace (the only case where this 32/64 compat code is used). The fix is to calculate name length directly from the start instead of trying to derive it later using count and offsetof. Signed-off-by: David Teigland diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 065149e..ebce994 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -84,7 +84,7 @@ struct dlm_lock_result32 { static void compat_input(struct dlm_write_request *kb, struct dlm_write_request32 *kb32, - size_t count) + int namelen) { kb->version[0] = kb32->version[0]; kb->version[1] = kb32->version[1]; @@ -96,8 +96,7 @@ static void compat_input(struct dlm_write_request *kb, kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { kb->i.lspace.flags = kb32->i.lspace.flags; kb->i.lspace.minor = kb32->i.lspace.minor; - memcpy(kb->i.lspace.name, kb32->i.lspace.name, count - - offsetof(struct dlm_write_request32, i.lspace.name)); + memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen); } else if (kb->cmd == DLM_USER_PURGE) { kb->i.purge.nodeid = kb32->i.purge.nodeid; kb->i.purge.pid = kb32->i.purge.pid; @@ -115,8 +114,7 @@ static void compat_input(struct dlm_write_request *kb, kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); - memcpy(kb->i.lock.name, kb32->i.lock.name, count - - offsetof(struct dlm_write_request32, i.lock.name)); + memcpy(kb->i.lock.name, kb32->i.lock.name, namelen); } } @@ -539,9 +537,16 @@ static ssize_t device_write(struct file *file, const char __user *buf, #ifdef CONFIG_COMPAT if (!kbuf->is64bit) { struct dlm_write_request32 *k32buf; + int namelen = 0; + + if (count > sizeof(struct dlm_write_request32)) + namelen = count - sizeof(struct dlm_write_request32); + k32buf = (struct dlm_write_request32 *)kbuf; - kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - - sizeof(struct dlm_write_request32)), GFP_KERNEL); + + /* add 1 after namelen so that the name string is terminated */ + kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1, + GFP_KERNEL); if (!kbuf) { kfree(k32buf); return -ENOMEM; @@ -549,7 +554,8 @@ static ssize_t device_write(struct file *file, const char __user *buf, if (proc) set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); - compat_input(kbuf, k32buf, count + 1); + + compat_input(kbuf, k32buf, namelen); kfree(k32buf); } #endif -- cgit v0.10.2 From 1aa292bb1c53500e3ab570b955d03afa97a9404d Mon Sep 17 00:00:00 2001 From: David Moore Date: Tue, 22 Jul 2008 23:23:40 -0700 Subject: firewire: Include iso timestamp in headers when header_size > 4 Previously, when an iso context had header_size > 4, the iso header (len/tag/channel/tcode/sy) was passed to userspace followed by quadlets stripped from the payload. This patch changes the behavior: header_size = 8 now passes the header quadlet followed by the timestamp quadlet. When header_size > 8, quadlets are stripped from the payload. The header_size = 4 case remains identical. Since this alters the semantics of the API, the firewire API version needs to be bumped concurrently with this change. This change also refactors the header copying code slightly to be much easier to read. Signed-off-by: David Moore Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 6d19828..bbfd6cd 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -1765,6 +1765,28 @@ ohci_get_bus_time(struct fw_card *card) return bus_time; } +static void copy_iso_headers(struct iso_context *ctx, void *p) +{ + int i = ctx->header_length; + + if (i + ctx->base.header_size > PAGE_SIZE) + return; + + /* + * The iso header is byteswapped to little endian by + * the controller, but the remaining header quadlets + * are big endian. We want to present all the headers + * as big endian, so we have to swap the first quadlet. + */ + if (ctx->base.header_size > 0) + *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); + if (ctx->base.header_size > 4) + *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); + if (ctx->base.header_size > 8) + memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); + ctx->header_length += ctx->base.header_size; +} + static int handle_ir_dualbuffer_packet(struct context *context, struct descriptor *d, struct descriptor *last) @@ -1775,7 +1797,6 @@ static int handle_ir_dualbuffer_packet(struct context *context, __le32 *ir_header; size_t header_length; void *p, *end; - int i; if (db->first_res_count != 0 && db->second_res_count != 0) { if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { @@ -1788,25 +1809,14 @@ static int handle_ir_dualbuffer_packet(struct context *context, header_length = le16_to_cpu(db->first_req_count) - le16_to_cpu(db->first_res_count); - i = ctx->header_length; p = db + 1; end = p + header_length; - while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { - /* - * The iso header is byteswapped to little endian by - * the controller, but the remaining header quadlets - * are big endian. We want to present all the headers - * as big endian, so we have to swap the first - * quadlet. - */ - *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); - memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); - i += ctx->base.header_size; + while (p < end) { + copy_iso_headers(ctx, p); ctx->excess_bytes += (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; - p += ctx->base.header_size + 4; + p += max(ctx->base.header_size, (size_t)8); } - ctx->header_length = i; ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - le16_to_cpu(db->second_res_count); @@ -1832,7 +1842,6 @@ static int handle_ir_packet_per_buffer(struct context *context, struct descriptor *pd; __le32 *ir_header; void *p; - int i; for (pd = d; pd <= last; pd++) { if (pd->transfer_status) @@ -1842,21 +1851,8 @@ static int handle_ir_packet_per_buffer(struct context *context, /* Descriptor(s) not done yet, stop iteration */ return 0; - i = ctx->header_length; - p = last + 1; - - if (ctx->base.header_size > 0 && - i + ctx->base.header_size <= PAGE_SIZE) { - /* - * The iso header is byteswapped to little endian by - * the controller, but the remaining header quadlets - * are big endian. We want to present all the headers - * as big endian, so we have to swap the first quadlet. - */ - *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); - memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); - ctx->header_length += ctx->base.header_size; - } + p = last + 1; + copy_iso_headers(ctx, p); if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { ir_header = (__le32 *) p; @@ -2151,11 +2147,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, z = 2; /* - * The OHCI controller puts the status word in the header - * buffer too, so we need 4 extra bytes per packet. + * The OHCI controller puts the isochronous header and trailer in the + * buffer, so we need at least 8 bytes. */ packet_count = p->header_length / ctx->base.header_size; - header_size = packet_count * (ctx->base.header_size + 4); + header_size = packet_count * max(ctx->base.header_size, (size_t)8); /* Get header size in number of descriptors. */ header_z = DIV_ROUND_UP(header_size, sizeof(*d)); @@ -2173,7 +2169,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, db = (struct db_descriptor *) d; db->control = cpu_to_le16(DESCRIPTOR_STATUS | DESCRIPTOR_BRANCH_ALWAYS); - db->first_size = cpu_to_le16(ctx->base.header_size + 4); + db->first_size = + cpu_to_le16(max(ctx->base.header_size, (size_t)8)); if (p->skip && rest == p->payload_length) { db->control |= cpu_to_le16(DESCRIPTOR_WAIT); db->first_req_count = db->first_size; @@ -2223,11 +2220,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, int page, offset, packet_count, header_size, payload_per_buffer; /* - * The OHCI controller puts the status word in the - * buffer too, so we need 4 extra bytes per packet. + * The OHCI controller puts the isochronous header and trailer in the + * buffer, so we need at least 8 bytes. */ packet_count = p->header_length / ctx->base.header_size; - header_size = ctx->base.header_size + 4; + header_size = max(ctx->base.header_size, (size_t)8); /* Get header size in number of descriptors. */ header_z = DIV_ROUND_UP(header_size, sizeof(*d)); -- cgit v0.10.2 From cf417e5494582453c033d8cac9e1352e74215435 Mon Sep 17 00:00:00 2001 From: Jay Fenlason Date: Fri, 3 Oct 2008 11:19:09 -0400 Subject: firewire: add a client_list_lock This adds a client_list_lock, which only protects the device's client_list, so that future versions of the driver can call code that takes the card->lock while holding the client_list_lock. Adding this lock is much simpler than adding __ versions of all the functions that the future version may need. The one ordering issue is to make sure code never takes the client_list_lock with card->lock held. Since client_list_lock is only used in three places, that isn't hard. Signed-off-by: Jay Fenlason Update fill_bus_reset_event() accordingly. Include linux/spinlock.h. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index ed03234..40cc973 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -132,9 +133,9 @@ static int fw_device_op_open(struct inode *inode, struct file *file) file->private_data = client; - spin_lock_irqsave(&device->card->lock, flags); + spin_lock_irqsave(&device->client_list_lock, flags); list_add_tail(&client->link, &device->client_list); - spin_unlock_irqrestore(&device->card->lock, flags); + spin_unlock_irqrestore(&device->client_list_lock, flags); return 0; } @@ -205,12 +206,14 @@ fw_device_op_read(struct file *file, return dequeue_event(client, buffer, count); } -/* caller must hold card->lock so that node pointers can be dereferenced here */ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, struct client *client) { struct fw_card *card = client->device->card; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); event->closure = client->bus_reset_closure; event->type = FW_CDEV_EVENT_BUS_RESET; @@ -220,22 +223,23 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, event->bm_node_id = 0; /* FIXME: We don't track the BM. */ event->irm_node_id = card->irm_node->node_id; event->root_node_id = card->root_node->node_id; + + spin_unlock_irqrestore(&card->lock, flags); } static void for_each_client(struct fw_device *device, void (*callback)(struct client *client)) { - struct fw_card *card = device->card; struct client *c; unsigned long flags; - spin_lock_irqsave(&card->lock, flags); + spin_lock_irqsave(&device->client_list_lock, flags); list_for_each_entry(c, &device->client_list, link) callback(c); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irqrestore(&device->client_list_lock, flags); } static void @@ -274,11 +278,11 @@ static int ioctl_get_info(struct client *client, void *buffer) { struct fw_cdev_get_info *get_info = buffer; struct fw_cdev_event_bus_reset bus_reset; - struct fw_card *card = client->device->card; unsigned long ret = 0; client->version = get_info->version; get_info->version = FW_CDEV_VERSION; + get_info->card = client->device->card->index; down_read(&fw_device_rwsem); @@ -300,18 +304,12 @@ static int ioctl_get_info(struct client *client, void *buffer) client->bus_reset_closure = get_info->bus_reset_closure; if (get_info->bus_reset != 0) { void __user *uptr = u64_to_uptr(get_info->bus_reset); - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); fill_bus_reset_event(&bus_reset, client); - spin_unlock_irqrestore(&card->lock, flags); - if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) return -EFAULT; } - get_info->card = card->index; - return 0; } @@ -1009,9 +1007,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file) list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); - spin_lock_irqsave(&client->device->card->lock, flags); + spin_lock_irqsave(&client->device->client_list_lock, flags); list_del(&client->link); - spin_unlock_irqrestore(&client->device->card->lock, flags); + spin_unlock_irqrestore(&client->device->client_list_lock, flags); fw_device_put(client->device); kfree(client); diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index bf53acb..ffde1be 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "fw-transaction.h" @@ -1004,6 +1005,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) device->node = fw_node_get(node); device->node_id = node->node_id; device->generation = card->generation; + spin_lock_init(&device->client_list_lock); INIT_LIST_HEAD(&device->client_list); /* diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 8ef6ec2..008a790 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -23,6 +23,7 @@ #include #include #include +#include #include enum fw_device_state { @@ -64,6 +65,8 @@ struct fw_device { bool cmc; struct fw_card *card; struct device device; + /* to prevent deadlocks, never take this lock with card->lock held */ + spinlock_t client_list_lock; struct list_head client_list; u32 *config_rom; size_t config_rom_length; -- cgit v0.10.2 From d67cfb9613f373d76daa2c8d209629601424ca12 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 5 Oct 2008 10:37:11 +0200 Subject: firewire: convert client_list_lock to mutex So far it is only taken in non-atomic contexts. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 40cc973..75bbd66 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -108,7 +109,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file) { struct fw_device *device; struct client *client; - unsigned long flags; device = fw_device_get_by_devt(inode->i_rdev); if (device == NULL) @@ -133,9 +133,9 @@ static int fw_device_op_open(struct inode *inode, struct file *file) file->private_data = client; - spin_lock_irqsave(&device->client_list_lock, flags); + mutex_lock(&device->client_list_mutex); list_add_tail(&client->link, &device->client_list); - spin_unlock_irqrestore(&device->client_list_lock, flags); + mutex_unlock(&device->client_list_mutex); return 0; } @@ -232,14 +232,11 @@ for_each_client(struct fw_device *device, void (*callback)(struct client *client)) { struct client *c; - unsigned long flags; - - spin_lock_irqsave(&device->client_list_lock, flags); + mutex_lock(&device->client_list_mutex); list_for_each_entry(c, &device->client_list, link) callback(c); - - spin_unlock_irqrestore(&device->client_list_lock, flags); + mutex_unlock(&device->client_list_mutex); } static void @@ -247,7 +244,7 @@ queue_bus_reset_event(struct client *client) { struct bus_reset *bus_reset; - bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); + bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL); if (bus_reset == NULL) { fw_notify("Out of memory when allocating bus reset event\n"); return; @@ -988,7 +985,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file) struct client *client = file->private_data; struct event *e, *next_e; struct client_resource *r, *next_r; - unsigned long flags; if (client->buffer.pages) fw_iso_buffer_destroy(&client->buffer, client->device->card); @@ -1007,9 +1003,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file) list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); - spin_lock_irqsave(&client->device->client_list_lock, flags); + mutex_lock(&client->device->client_list_mutex); list_del(&client->link); - spin_unlock_irqrestore(&client->device->client_list_lock, flags); + mutex_unlock(&client->device->client_list_mutex); fw_device_put(client->device); kfree(client); diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index ffde1be..2de3dd5 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -1005,7 +1006,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) device->node = fw_node_get(node); device->node_id = node->node_id; device->generation = card->generation; - spin_lock_init(&device->client_list_lock); + mutex_init(&device->client_list_mutex); INIT_LIST_HEAD(&device->client_list); /* diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 008a790..655d7e8 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include enum fw_device_state { @@ -65,9 +65,10 @@ struct fw_device { bool cmc; struct fw_card *card; struct device device; - /* to prevent deadlocks, never take this lock with card->lock held */ - spinlock_t client_list_lock; + + struct mutex client_list_mutex; struct list_head client_list; + u32 *config_rom; size_t config_rom_length; int config_rom_retries; -- cgit v0.10.2 From bf8e3355ec8f4e472f9841e94203cd759b45226e Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Fri, 5 Dec 2008 22:43:41 +0100 Subject: firewire: cdev: documentation fixlet Reported-by: Jay Fenlason Signed-off-by: Stefan Richter diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 4d078e9..899ef27 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -229,7 +229,7 @@ struct fw_cdev_get_info { * Send a request to the device. This ioctl implements all outgoing requests. * Both quadlet and block request specify the payload as a pointer to the data * in the @data field. Once the transaction completes, the kernel writes an - * &fw_cdev_event_request event back. The @closure field is passed back to + * &fw_cdev_event_response event back. The @closure field is passed back to * user space in the response event. */ struct fw_cdev_send_request { -- cgit v0.10.2 From 1f3125af8ed7410cc0ebcc0acd59bbfc1ae0057a Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Fri, 5 Dec 2008 22:44:42 +0100 Subject: firewire: cdev: tcodes input validation The behaviour of fw-transaction.c::fw_send_request is ill-defined for any other tcodes than read/ write/ lock request tcodes. Therefore prevent requests with wrong tcodes from entering the transaction layer. Maybe fw_send_request should check them itself, but I am not inclined to change it and fw_fill_request from void-valued functions to ones which return error codes and pass those up. Besides, maybe fw_send_request is going to support one more tcode than ioctl_send_request in the future (TCODE_STREAM_DATA). Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 75bbd66..a320ab4 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -398,6 +398,7 @@ static int ioctl_send_request(struct client *client, void *buffer) struct fw_device *device = client->device; struct fw_cdev_send_request *request = buffer; struct response *response; + int ret; /* What is the biggest size we'll accept, really? */ if (request->length > 4096) @@ -414,8 +415,26 @@ static int ioctl_send_request(struct client *client, void *buffer) if (request->data && copy_from_user(response->response.data, u64_to_uptr(request->data), request->length)) { - kfree(response); - return -EFAULT; + ret = -EFAULT; + goto err; + } + + switch (request->tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_QUADLET_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_MASK_SWAP: + case TCODE_LOCK_COMPARE_SWAP: + case TCODE_LOCK_FETCH_ADD: + case TCODE_LOCK_LITTLE_ADD: + case TCODE_LOCK_BOUNDED_ADD: + case TCODE_LOCK_WRAP_ADD: + case TCODE_LOCK_VENDOR_DEPENDENT: + break; + default: + ret = -EINVAL; + goto err; } response->resource.release = release_transaction; @@ -434,6 +453,10 @@ static int ioctl_send_request(struct client *client, void *buffer) return sizeof(request) + request->length; else return sizeof(request); + err: + kfree(response); + + return ret; } struct address_handler { -- cgit v0.10.2 From 97811e347310766030a648fdf0e407b2c91a39c1 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 19:19:23 +0100 Subject: firewire: cdev: fix race of fw_device_op_release with bus reset Unlink the client from the fw_device earlier in order to prevent bus reset events being added to client->event_list during shutdown. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index a320ab4..4dd66c1 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1009,6 +1009,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file) struct event *e, *next_e; struct client_resource *r, *next_r; + mutex_lock(&client->device->client_list_mutex); + list_del(&client->link); + mutex_unlock(&client->device->client_list_mutex); + if (client->buffer.pages) fw_iso_buffer_destroy(&client->buffer, client->device->card); @@ -1026,10 +1030,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file) list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); - mutex_lock(&client->device->client_list_mutex); - list_del(&client->link); - mutex_unlock(&client->device->client_list_mutex); - fw_device_put(client->device); kfree(client); -- cgit v0.10.2 From 45ee3199eb3e4233b755a9bb353a0527a4c58b5f Mon Sep 17 00:00:00 2001 From: Jay Fenlason Date: Sun, 21 Dec 2008 16:47:17 +0100 Subject: firewire: cdev: use an idr rather than a linked list for resources The current code uses a linked list and a counter for storing resources and the corresponding handle numbers. By changing to an idr we can be safe from counter wrap-around giving two resources the same handle. Furthermore, the deallocation ioctls now check whether the resource to be freed is of the intended type. Signed-off-by: Jay Fenlason Some rework by Stefan R: - The idr API documentation says we get an ID within 0...0x7fffffff. Hence we can rest assured that idr handles fit into cdev handles. - Fix some races. Add a client->in_shutdown flag for this purpose. - Add allocation retry to add_client_resource(). - It is possible to use idr_for_each() in fw_device_op_release(). - Fix ioctl_send_response() regression. - Small style changes. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 4dd66c1..094aee5 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -41,10 +41,12 @@ #include "fw-device.h" struct client; +struct client_resource; +typedef void (*client_resource_release_fn_t)(struct client *, + struct client_resource *); struct client_resource { - struct list_head link; - void (*release)(struct client *client, struct client_resource *r); - u32 handle; + client_resource_release_fn_t release; + int handle; }; /* @@ -78,9 +80,10 @@ struct iso_interrupt { struct client { u32 version; struct fw_device *device; + spinlock_t lock; - u32 resource_handle; - struct list_head resource_list; + bool in_shutdown; + struct idr resource_idr; struct list_head event_list; wait_queue_head_t wait; u64 bus_reset_closure; @@ -126,9 +129,9 @@ static int fw_device_op_open(struct inode *inode, struct file *file) } client->device = device; - INIT_LIST_HEAD(&client->event_list); - INIT_LIST_HEAD(&client->resource_list); spin_lock_init(&client->lock); + idr_init(&client->resource_idr); + INIT_LIST_HEAD(&client->event_list); init_waitqueue_head(&client->wait); file->private_data = client; @@ -151,7 +154,10 @@ static void queue_event(struct client *client, struct event *event, event->v[1].size = size1; spin_lock_irqsave(&client->lock, flags); - list_add_tail(&event->link, &client->event_list); + if (client->in_shutdown) + kfree(event); + else + list_add_tail(&event->link, &client->event_list); spin_unlock_irqrestore(&client->lock, flags); wake_up_interruptible(&client->wait); @@ -310,34 +316,49 @@ static int ioctl_get_info(struct client *client, void *buffer) return 0; } -static void -add_client_resource(struct client *client, struct client_resource *resource) +static int +add_client_resource(struct client *client, struct client_resource *resource, + gfp_t gfp_mask) { unsigned long flags; + int ret; + + retry: + if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) + return -ENOMEM; spin_lock_irqsave(&client->lock, flags); - list_add_tail(&resource->link, &client->resource_list); - resource->handle = client->resource_handle++; + if (client->in_shutdown) + ret = -ECANCELED; + else + ret = idr_get_new(&client->resource_idr, resource, + &resource->handle); spin_unlock_irqrestore(&client->lock, flags); + + if (ret == -EAGAIN) + goto retry; + + return ret < 0 ? ret : 0; } static int release_client_resource(struct client *client, u32 handle, + client_resource_release_fn_t release, struct client_resource **resource) { struct client_resource *r; unsigned long flags; spin_lock_irqsave(&client->lock, flags); - list_for_each_entry(r, &client->resource_list, link) { - if (r->handle == handle) { - list_del(&r->link); - break; - } - } + if (client->in_shutdown) + r = NULL; + else + r = idr_find(&client->resource_idr, handle); + if (r && r->release == release) + idr_remove(&client->resource_idr, handle); spin_unlock_irqrestore(&client->lock, flags); - if (&r->link == &client->resource_list) + if (!(r && r->release == release)) return -EINVAL; if (resource) @@ -372,7 +393,12 @@ complete_transaction(struct fw_card *card, int rcode, memcpy(r->data, payload, r->length); spin_lock_irqsave(&client->lock, flags); - list_del(&response->resource.link); + /* + * If called while in shutdown, the idr tree must be left untouched. + * The idr handle will be removed later. + */ + if (!client->in_shutdown) + idr_remove(&client->resource_idr, response->resource.handle); spin_unlock_irqrestore(&client->lock, flags); r->type = FW_CDEV_EVENT_RESPONSE; @@ -416,7 +442,7 @@ static int ioctl_send_request(struct client *client, void *buffer) copy_from_user(response->response.data, u64_to_uptr(request->data), request->length)) { ret = -EFAULT; - goto err; + goto failed; } switch (request->tcode) { @@ -434,11 +460,13 @@ static int ioctl_send_request(struct client *client, void *buffer) break; default: ret = -EINVAL; - goto err; + goto failed; } response->resource.release = release_transaction; - add_client_resource(client, &response->resource); + ret = add_client_resource(client, &response->resource, GFP_KERNEL); + if (ret < 0) + goto failed; fw_send_request(device->card, &response->transaction, request->tcode & 0x1f, @@ -453,7 +481,7 @@ static int ioctl_send_request(struct client *client, void *buffer) return sizeof(request) + request->length; else return sizeof(request); - err: + failed: kfree(response); return ret; @@ -500,22 +528,21 @@ handle_request(struct fw_card *card, struct fw_request *r, struct request *request; struct request_event *e; struct client *client = handler->client; + int ret; request = kmalloc(sizeof(*request), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (request == NULL || e == NULL) { - kfree(request); - kfree(e); - fw_send_response(card, r, RCODE_CONFLICT_ERROR); - return; - } + if (request == NULL || e == NULL) + goto failed; request->request = r; request->data = payload; request->length = length; request->resource.release = release_request; - add_client_resource(client, &request->resource); + ret = add_client_resource(client, &request->resource, GFP_ATOMIC); + if (ret < 0) + goto failed; e->request.type = FW_CDEV_EVENT_REQUEST; e->request.tcode = tcode; @@ -526,6 +553,12 @@ handle_request(struct fw_card *card, struct fw_request *r, queue_event(client, &e->event, &e->request, sizeof(e->request), payload, length); + return; + + failed: + kfree(request); + kfree(e); + fw_send_response(card, r, RCODE_CONFLICT_ERROR); } static void @@ -544,6 +577,7 @@ static int ioctl_allocate(struct client *client, void *buffer) struct fw_cdev_allocate *request = buffer; struct address_handler *handler; struct fw_address_region region; + int ret; handler = kmalloc(sizeof(*handler), GFP_KERNEL); if (handler == NULL) @@ -563,7 +597,11 @@ static int ioctl_allocate(struct client *client, void *buffer) } handler->resource.release = release_address_handler; - add_client_resource(client, &handler->resource); + ret = add_client_resource(client, &handler->resource, GFP_KERNEL); + if (ret < 0) { + release_address_handler(client, &handler->resource); + return ret; + } request->handle = handler->resource.handle; return 0; @@ -573,7 +611,8 @@ static int ioctl_deallocate(struct client *client, void *buffer) { struct fw_cdev_deallocate *request = buffer; - return release_client_resource(client, request->handle, NULL); + return release_client_resource(client, request->handle, + release_address_handler, NULL); } static int ioctl_send_response(struct client *client, void *buffer) @@ -582,8 +621,10 @@ static int ioctl_send_response(struct client *client, void *buffer) struct client_resource *resource; struct request *r; - if (release_client_resource(client, request->handle, &resource) < 0) + if (release_client_resource(client, request->handle, + release_request, &resource) < 0) return -EINVAL; + r = container_of(resource, struct request, resource); if (request->length < r->length) r->length = request->length; @@ -626,7 +667,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer) { struct fw_cdev_add_descriptor *request = buffer; struct descriptor *descriptor; - int retval; + int ret; if (request->length > 256) return -EINVAL; @@ -638,8 +679,8 @@ static int ioctl_add_descriptor(struct client *client, void *buffer) if (copy_from_user(descriptor->data, u64_to_uptr(request->data), request->length * 4)) { - kfree(descriptor); - return -EFAULT; + ret = -EFAULT; + goto failed; } descriptor->d.length = request->length; @@ -647,24 +688,31 @@ static int ioctl_add_descriptor(struct client *client, void *buffer) descriptor->d.key = request->key; descriptor->d.data = descriptor->data; - retval = fw_core_add_descriptor(&descriptor->d); - if (retval < 0) { - kfree(descriptor); - return retval; - } + ret = fw_core_add_descriptor(&descriptor->d); + if (ret < 0) + goto failed; descriptor->resource.release = release_descriptor; - add_client_resource(client, &descriptor->resource); + ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL); + if (ret < 0) { + fw_core_remove_descriptor(&descriptor->d); + goto failed; + } request->handle = descriptor->resource.handle; return 0; + failed: + kfree(descriptor); + + return ret; } static int ioctl_remove_descriptor(struct client *client, void *buffer) { struct fw_cdev_remove_descriptor *request = buffer; - return release_client_resource(client, request->handle, NULL); + return release_client_resource(client, request->handle, + release_descriptor, NULL); } static void @@ -1003,11 +1051,21 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) return retval; } +static int shutdown_resource(int id, void *p, void *data) +{ + struct client_resource *r = p; + struct client *client = data; + + r->release(client, r); + + return 0; +} + static int fw_device_op_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct event *e, *next_e; - struct client_resource *r, *next_r; + unsigned long flags; mutex_lock(&client->device->client_list_mutex); list_del(&client->link); @@ -1019,17 +1077,22 @@ static int fw_device_op_release(struct inode *inode, struct file *file) if (client->iso_context) fw_iso_context_destroy(client->iso_context); - list_for_each_entry_safe(r, next_r, &client->resource_list, link) - r->release(client, r); + /* Freeze client->resource_idr and client->event_list */ + spin_lock_irqsave(&client->lock, flags); + client->in_shutdown = true; + spin_unlock_irqrestore(&client->lock, flags); - /* - * FIXME: We should wait for the async tasklets to stop - * running before freeing the memory. - */ + idr_for_each(&client->resource_idr, shutdown_resource, client); + idr_remove_all(&client->resource_idr); + idr_destroy(&client->resource_idr); list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); + /* + * FIXME: client should be reference-counted. It's extremely unlikely + * but there may still be transactions being completed at this point. + */ fw_device_put(client->device); kfree(client); -- cgit v0.10.2 From 3e0b5f0d7cb5fef402517e41eebff5a0f0e65a13 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 19:21:01 +0100 Subject: firewire: cdev: address handler input validation Like before my commit 1415d9189e8c59aa9c77a3bba419dcea062c145f, fw_core_add_address_handler() does not align the address region now. Instead the caller is required to pass valid parameters. Since one of the callers of fw_core_add_address_handler() is the cdev userspace interface, we now check for valid input. If the client is buggy, we give it a hint with -EINVAL. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 094aee5..44af452 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -591,9 +591,10 @@ static int ioctl_allocate(struct client *client, void *buffer) handler->closure = request->closure; handler->client = client; - if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { + ret = fw_core_add_address_handler(&handler->handler, ®ion); + if (ret < 0) { kfree(handler); - return -EBUSY; + return ret; } handler->resource.release = release_address_handler; diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 699ac04..12a6cdc 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -449,16 +449,19 @@ const struct fw_address_region fw_unit_space_region = #endif /* 0 */ /** - * Allocate a range of addresses in the node space of the OHCI - * controller. When a request is received that falls within the - * specified address range, the specified callback is invoked. The - * parameters passed to the callback give the details of the - * particular request. + * fw_core_add_address_handler - register for incoming requests + * @handler: callback + * @region: region in the IEEE 1212 node space address range + * + * region->start, ->end, and handler->length have to be quadlet-aligned. + * + * When a request is received that falls within the specified address range, + * the specified callback is invoked. The parameters passed to the callback + * give the details of the particular request. * * Return value: 0 on success, non-zero otherwise. * The start offset of the handler's address region is determined by * fw_core_add_address_handler() and is returned in handler->offset. - * The offset is quadlet-aligned. */ int fw_core_add_address_handler(struct fw_address_handler *handler, @@ -468,17 +471,23 @@ fw_core_add_address_handler(struct fw_address_handler *handler, unsigned long flags; int ret = -EBUSY; + if (region->start & 0xffff000000000003ULL || + region->end & 0xffff000000000003ULL || + region->start >= region->end || + handler->length & 3 || + handler->length == 0) + return -EINVAL; + spin_lock_irqsave(&address_handler_lock, flags); - handler->offset = roundup(region->start, 4); + handler->offset = region->start; while (handler->offset + handler->length <= region->end) { other = lookup_overlapping_address_handler(&address_handler_list, handler->offset, handler->length); if (other != NULL) { - handler->offset = - roundup(other->offset + other->length, 4); + handler->offset += other->length; } else { list_add_tail(&handler->link, &address_handler_list); ret = 0; -- cgit v0.10.2 From 44be21b63e0c551df21253540b7f216f0d18928e Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 19:21:31 +0100 Subject: firewire: core: remove outdated comment Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 12a6cdc..024d1f5 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -502,12 +502,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler, EXPORT_SYMBOL(fw_core_add_address_handler); /** - * Deallocate a range of addresses allocated with fw_allocate. This - * will call the associated callback one last time with a the special - * tcode TCODE_DEALLOCATE, to let the client destroy the registered - * callback data. For convenience, the callback parameters offset and - * length are set to the start and the length respectively for the - * deallocated region, payload is set to NULL. + * fw_core_remove_address_handler - unregister an address handler */ void fw_core_remove_address_handler(struct fw_address_handler *handler) { -- cgit v0.10.2 From c490a6dec6cc1b7f0eab56b9fbd565129b3dea2e Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 21:45:14 +0100 Subject: firewire: core: remove obsolete assertions This code never changes. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 024d1f5..058f5ed 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -955,19 +955,10 @@ static int __init fw_core_init(void) return fw_cdev_major; } - retval = fw_core_add_address_handler(&topology_map, - &topology_map_region); - BUG_ON(retval < 0); - - retval = fw_core_add_address_handler(®isters, - ®isters_region); - BUG_ON(retval < 0); - - /* Add the vendor textual descriptor. */ - retval = fw_core_add_descriptor(&vendor_id_descriptor); - BUG_ON(retval < 0); - retval = fw_core_add_descriptor(&model_id_descriptor); - BUG_ON(retval < 0); + fw_core_add_address_handler(&topology_map, &topology_map_region); + fw_core_add_address_handler(®isters, ®isters_region); + fw_core_add_descriptor(&vendor_id_descriptor); + fw_core_add_descriptor(&model_id_descriptor); return 0; } -- cgit v0.10.2 From 2dbd7d7e2327b0c2cc4e2de903e1cfa19980a504 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 21:45:45 +0100 Subject: firewire: standardize a variable name "ret" is the new "retval". Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 44af452..07c32a4 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -169,13 +169,13 @@ dequeue_event(struct client *client, char __user *buffer, size_t count) unsigned long flags; struct event *event; size_t size, total; - int i, retval; + int i, ret; - retval = wait_event_interruptible(client->wait, - !list_empty(&client->event_list) || - fw_device_is_shutdown(client->device)); - if (retval < 0) - return retval; + ret = wait_event_interruptible(client->wait, + !list_empty(&client->event_list) || + fw_device_is_shutdown(client->device)); + if (ret < 0) + return ret; if (list_empty(&client->event_list) && fw_device_is_shutdown(client->device)) @@ -190,17 +190,17 @@ dequeue_event(struct client *client, char __user *buffer, size_t count) for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { size = min(event->v[i].size, count - total); if (copy_to_user(buffer + total, event->v[i].data, size)) { - retval = -EFAULT; + ret = -EFAULT; goto out; } total += size; } - retval = total; + ret = total; out: kfree(event); - return retval; + return ret; } static ssize_t @@ -958,7 +958,7 @@ static int dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) { char buffer[256]; - int retval; + int ret; if (_IOC_TYPE(cmd) != '#' || _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) @@ -970,9 +970,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) return -EFAULT; } - retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); - if (retval < 0) - return retval; + ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); + if (ret < 0) + return ret; if (_IOC_DIR(cmd) & _IOC_READ) { if (_IOC_SIZE(cmd) > sizeof(buffer) || @@ -980,7 +980,7 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) return -EFAULT; } - return retval; + return ret; } static long @@ -1014,7 +1014,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) struct client *client = file->private_data; enum dma_data_direction direction; unsigned long size; - int page_count, retval; + int page_count, ret; if (fw_device_is_shutdown(client->device)) return -ENODEV; @@ -1040,16 +1040,16 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) else direction = DMA_FROM_DEVICE; - retval = fw_iso_buffer_init(&client->buffer, client->device->card, - page_count, direction); - if (retval < 0) - return retval; + ret = fw_iso_buffer_init(&client->buffer, client->device->card, + page_count, direction); + if (ret < 0) + return ret; - retval = fw_iso_buffer_map(&client->buffer, vma); - if (retval < 0) + ret = fw_iso_buffer_map(&client->buffer, vma); + if (ret < 0) fw_iso_buffer_destroy(&client->buffer, client->device->card); - return retval; + return ret; } static int shutdown_resource(int id, void *p, void *data) diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index e14c03d..cb32b20 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -32,7 +32,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction) { - int i, j, retval = -ENOMEM; + int i, j; dma_addr_t address; buffer->page_count = page_count; @@ -69,19 +69,19 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, kfree(buffer->pages); out: buffer->pages = NULL; - return retval; + return -ENOMEM; } int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) { unsigned long uaddr; - int i, retval; + int i, ret; uaddr = vma->vm_start; for (i = 0; i < buffer->page_count; i++) { - retval = vm_insert_page(vma, uaddr, buffer->pages[i]); - if (retval) - return retval; + ret = vm_insert_page(vma, uaddr, buffer->pages[i]); + if (ret) + return ret; uaddr += PAGE_SIZE; } diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index bbfd6cd..b941ab3 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -1209,7 +1209,7 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet) { unsigned long flags; - int retval; + int ret; spin_lock_irqsave(&ctx->ohci->lock, flags); @@ -1220,10 +1220,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet) return; } - retval = at_context_queue_packet(ctx, packet); + ret = at_context_queue_packet(ctx, packet); spin_unlock_irqrestore(&ctx->ohci->lock, flags); - if (retval < 0) + if (ret < 0) packet->callback(packet, &ctx->ohci->card, packet->ack); } @@ -1595,7 +1595,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) { struct fw_ohci *ohci; unsigned long flags; - int retval = -EBUSY; + int ret = -EBUSY; __be32 *next_config_rom; dma_addr_t uninitialized_var(next_config_rom_bus); @@ -1649,7 +1649,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); - retval = 0; + ret = 0; } spin_unlock_irqrestore(&ohci->lock, flags); @@ -1661,13 +1661,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) * controller could need to access it before the bus reset * takes effect. */ - if (retval == 0) + if (ret == 0) fw_core_initiate_bus_reset(&ohci->card, 1); else dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, next_config_rom_bus); - return retval; + return ret; } static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) @@ -1689,7 +1689,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) struct fw_ohci *ohci = fw_ohci(card); struct context *ctx = &ohci->at_request_ctx; struct driver_data *driver_data = packet->driver_data; - int retval = -ENOENT; + int ret = -ENOENT; tasklet_disable(&ctx->tasklet); @@ -1704,12 +1704,11 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) driver_data->packet = NULL; packet->ack = RCODE_CANCELLED; packet->callback(packet, &ohci->card, packet->ack); - retval = 0; - + ret = 0; out: tasklet_enable(&ctx->tasklet); - return retval; + return ret; } static int @@ -1720,7 +1719,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) #else struct fw_ohci *ohci = fw_ohci(card); unsigned long flags; - int n, retval = 0; + int n, ret = 0; /* * FIXME: Make sure this bitmask is cleared when we clear the busReset @@ -1730,7 +1729,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) spin_lock_irqsave(&ohci->lock, flags); if (ohci->generation != generation) { - retval = -ESTALE; + ret = -ESTALE; goto out; } @@ -1748,7 +1747,8 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) flush_writes(ohci); out: spin_unlock_irqrestore(&ohci->lock, flags); - return retval; + + return ret; #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ } @@ -1892,7 +1892,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) descriptor_callback_t callback; u32 *mask, regs; unsigned long flags; - int index, retval = -ENOMEM; + int index, ret = -ENOMEM; if (type == FW_ISO_CONTEXT_TRANSMIT) { mask = &ohci->it_context_mask; @@ -1928,8 +1928,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) if (ctx->header == NULL) goto out; - retval = context_init(&ctx->context, ohci, regs, callback); - if (retval < 0) + ret = context_init(&ctx->context, ohci, regs, callback); + if (ret < 0) goto out_with_header; return &ctx->base; @@ -1941,7 +1941,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) *mask |= 1 << index; spin_unlock_irqrestore(&ohci->lock, flags); - return ERR_PTR(retval); + return ERR_PTR(ret); } static int ohci_start_iso(struct fw_iso_context *base, @@ -2291,21 +2291,20 @@ ohci_queue_iso(struct fw_iso_context *base, { struct iso_context *ctx = container_of(base, struct iso_context, base); unsigned long flags; - int retval; + int ret; spin_lock_irqsave(&ctx->context.ohci->lock, flags); if (base->type == FW_ISO_CONTEXT_TRANSMIT) - retval = ohci_queue_iso_transmit(base, packet, buffer, payload); + ret = ohci_queue_iso_transmit(base, packet, buffer, payload); else if (ctx->context.ohci->use_dualbuffer) - retval = ohci_queue_iso_receive_dualbuffer(base, packet, - buffer, payload); + ret = ohci_queue_iso_receive_dualbuffer(base, packet, + buffer, payload); else - retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, - buffer, - payload); + ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, + buffer, payload); spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); - return retval; + return ret; } static const struct fw_card_driver ohci_driver = { diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 058f5ed..e17ebc4 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -943,11 +943,11 @@ static struct fw_descriptor model_id_descriptor = { static int __init fw_core_init(void) { - int retval; + int ret; - retval = bus_register(&fw_bus_type); - if (retval < 0) - return retval; + ret = bus_register(&fw_bus_type); + if (ret < 0) + return ret; fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); if (fw_cdev_major < 0) { -- cgit v0.10.2 From 53dca51175cc2f66d21aeb1e70146cca65c53dad Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 21:47:04 +0100 Subject: firewire: remove line breaks before function names type function_name(parameters); is nice to look at but was not used consistently. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index a5dd7a6..27a3aae 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -63,8 +63,7 @@ static int descriptor_count; #define BIB_CMC ((1) << 30) #define BIB_IMC ((1) << 31) -static u32 * -generate_config_rom(struct fw_card *card, size_t *config_rom_length) +static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length) { struct fw_descriptor *desc; static u32 config_rom[256]; @@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length) return config_rom; } -static void -update_config_roms(void) +static void update_config_roms(void) { struct fw_card *card; u32 *config_rom; @@ -141,8 +139,7 @@ update_config_roms(void) } } -int -fw_core_add_descriptor(struct fw_descriptor *desc) +int fw_core_add_descriptor(struct fw_descriptor *desc) { size_t i; @@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc) return 0; } -void -fw_core_remove_descriptor(struct fw_descriptor *desc) +void fw_core_remove_descriptor(struct fw_descriptor *desc) { mutex_lock(&card_mutex); @@ -189,8 +185,7 @@ static const char gap_count_table[] = { 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 }; -void -fw_schedule_bm_work(struct fw_card *card, unsigned long delay) +void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) { int scheduled; @@ -200,8 +195,7 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay) fw_card_put(card); } -static void -fw_card_bm_work(struct work_struct *work) +static void fw_card_bm_work(struct work_struct *work) { struct fw_card *card = container_of(work, struct fw_card, work.work); struct fw_device *root_device; @@ -371,17 +365,16 @@ fw_card_bm_work(struct work_struct *work) fw_card_put(card); } -static void -flush_timer_callback(unsigned long data) +static void flush_timer_callback(unsigned long data) { struct fw_card *card = (struct fw_card *)data; fw_flush_transactions(card); } -void -fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, - struct device *device) +void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, + struct device *device) { static atomic_t index = ATOMIC_INIT(-1); @@ -406,9 +399,8 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, } EXPORT_SYMBOL(fw_card_initialize); -int -fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid) +int fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid) { u32 *config_rom; size_t length; @@ -442,23 +434,20 @@ EXPORT_SYMBOL(fw_card_add); * dummy driver just fails all IO. */ -static int -dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) +static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) { BUG(); return -1; } -static int -dummy_update_phy_reg(struct fw_card *card, int address, - int clear_bits, int set_bits) +static int dummy_update_phy_reg(struct fw_card *card, int address, + int clear_bits, int set_bits) { return -ENODEV; } -static int -dummy_set_config_rom(struct fw_card *card, - u32 *config_rom, size_t length) +static int dummy_set_config_rom(struct fw_card *card, + u32 *config_rom, size_t length) { /* * We take the card out of card_list before setting the dummy @@ -468,27 +457,23 @@ dummy_set_config_rom(struct fw_card *card, return -1; } -static void -dummy_send_request(struct fw_card *card, struct fw_packet *packet) +static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) { packet->callback(packet, card, -ENODEV); } -static void -dummy_send_response(struct fw_card *card, struct fw_packet *packet) +static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) { packet->callback(packet, card, -ENODEV); } -static int -dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) +static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) { return -ENOENT; } -static int -dummy_enable_phys_dma(struct fw_card *card, - int node_id, int generation) +static int dummy_enable_phys_dma(struct fw_card *card, + int node_id, int generation) { return -ENODEV; } @@ -503,16 +488,14 @@ static struct fw_card_driver dummy_driver = { .enable_phys_dma = dummy_enable_phys_dma, }; -void -fw_card_release(struct kref *kref) +void fw_card_release(struct kref *kref) { struct fw_card *card = container_of(kref, struct fw_card, kref); complete(&card->done); } -void -fw_core_remove_card(struct fw_card *card) +void fw_core_remove_card(struct fw_card *card) { card->driver->update_phy_reg(card, 4, PHY_LINK_ACTIVE | PHY_CONTENDER, 0); @@ -536,8 +519,7 @@ fw_core_remove_card(struct fw_card *card) } EXPORT_SYMBOL(fw_core_remove_card); -int -fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) +int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) { int reg = short_reset ? 5 : 1; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 07c32a4..3c075b2 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -96,14 +96,12 @@ struct client { struct list_head link; }; -static inline void __user * -u64_to_uptr(__u64 value) +static inline void __user *u64_to_uptr(__u64 value) { return (void __user *)(unsigned long)value; } -static inline __u64 -uptr_to_u64(void __user *ptr) +static inline __u64 uptr_to_u64(void __user *ptr) { return (__u64)(unsigned long)ptr; } @@ -163,8 +161,8 @@ static void queue_event(struct client *client, struct event *event, wake_up_interruptible(&client->wait); } -static int -dequeue_event(struct client *client, char __user *buffer, size_t count) +static int dequeue_event(struct client *client, + char __user *buffer, size_t count) { unsigned long flags; struct event *event; @@ -203,18 +201,16 @@ dequeue_event(struct client *client, char __user *buffer, size_t count) return ret; } -static ssize_t -fw_device_op_read(struct file *file, - char __user *buffer, size_t count, loff_t *offset) +static ssize_t fw_device_op_read(struct file *file, char __user *buffer, + size_t count, loff_t *offset) { struct client *client = file->private_data; return dequeue_event(client, buffer, count); } -static void -fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, - struct client *client) +static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, + struct client *client) { struct fw_card *card = client->device->card; unsigned long flags; @@ -233,9 +229,8 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, spin_unlock_irqrestore(&card->lock, flags); } -static void -for_each_client(struct fw_device *device, - void (*callback)(struct client *client)) +static void for_each_client(struct fw_device *device, + void (*callback)(struct client *client)) { struct client *c; @@ -245,8 +240,7 @@ for_each_client(struct fw_device *device, mutex_unlock(&device->client_list_mutex); } -static void -queue_bus_reset_event(struct client *client) +static void queue_bus_reset_event(struct client *client) { struct bus_reset *bus_reset; @@ -316,9 +310,8 @@ static int ioctl_get_info(struct client *client, void *buffer) return 0; } -static int -add_client_resource(struct client *client, struct client_resource *resource, - gfp_t gfp_mask) +static int add_client_resource(struct client *client, + struct client_resource *resource, gfp_t gfp_mask) { unsigned long flags; int ret; @@ -341,10 +334,9 @@ add_client_resource(struct client *client, struct client_resource *resource, return ret < 0 ? ret : 0; } -static int -release_client_resource(struct client *client, u32 handle, - client_resource_release_fn_t release, - struct client_resource **resource) +static int release_client_resource(struct client *client, u32 handle, + client_resource_release_fn_t release, + struct client_resource **resource) { struct client_resource *r; unsigned long flags; @@ -369,8 +361,8 @@ release_client_resource(struct client *client, u32 handle, return 0; } -static void -release_transaction(struct client *client, struct client_resource *resource) +static void release_transaction(struct client *client, + struct client_resource *resource) { struct response *response = container_of(resource, struct response, resource); @@ -378,9 +370,8 @@ release_transaction(struct client *client, struct client_resource *resource) fw_cancel_transaction(client->device->card, &response->transaction); } -static void -complete_transaction(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) { struct response *response = data; struct client *client = response->client; @@ -506,8 +497,8 @@ struct request_event { struct fw_cdev_event_request request; }; -static void -release_request(struct client *client, struct client_resource *resource) +static void release_request(struct client *client, + struct client_resource *resource) { struct request *request = container_of(resource, struct request, resource); @@ -517,12 +508,11 @@ release_request(struct client *client, struct client_resource *resource) kfree(request); } -static void -handle_request(struct fw_card *card, struct fw_request *r, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void handle_request(struct fw_card *card, struct fw_request *r, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) { struct address_handler *handler = callback_data; struct request *request; @@ -561,9 +551,8 @@ handle_request(struct fw_card *card, struct fw_request *r, fw_send_response(card, r, RCODE_CONFLICT_ERROR); } -static void -release_address_handler(struct client *client, - struct client_resource *resource) +static void release_address_handler(struct client *client, + struct client_resource *resource) { struct address_handler *handler = container_of(resource, struct address_handler, resource); @@ -716,9 +705,8 @@ static int ioctl_remove_descriptor(struct client *client, void *buffer) release_descriptor, NULL); } -static void -iso_callback(struct fw_iso_context *context, u32 cycle, - size_t header_length, void *header, void *data) +static void iso_callback(struct fw_iso_context *context, u32 cycle, + size_t header_length, void *header, void *data) { struct client *client = data; struct iso_interrupt *irq; @@ -954,8 +942,8 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_cycle_timer, }; -static int -dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) +static int dispatch_ioctl(struct client *client, + unsigned int cmd, void __user *arg) { char buffer[256]; int ret; @@ -983,9 +971,8 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) return ret; } -static long -fw_device_op_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) +static long fw_device_op_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) { struct client *client = file->private_data; @@ -996,9 +983,8 @@ fw_device_op_ioctl(struct file *file, } #ifdef CONFIG_COMPAT -static long -fw_device_op_compat_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) +static long fw_device_op_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) { struct client *client = file->private_data; diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index 2de3dd5..ac5043c 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -134,8 +134,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) vendor, model, specifier_id, version); } -static int -fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) +static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) { struct fw_unit *unit = fw_unit(dev); char modalias[64]; @@ -193,8 +192,8 @@ struct config_rom_attribute { u32 key; }; -static ssize_t -show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) +static ssize_t show_immediate(struct device *dev, + struct device_attribute *dattr, char *buf) { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); @@ -225,8 +224,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) #define IMMEDIATE_ATTR(name, key) \ { __ATTR(name, S_IRUGO, show_immediate, NULL), key } -static ssize_t -show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) +static ssize_t show_text_leaf(struct device *dev, + struct device_attribute *dattr, char *buf) { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); @@ -295,10 +294,9 @@ static struct config_rom_attribute config_rom_attributes[] = { TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), }; -static void -init_fw_attribute_group(struct device *dev, - struct device_attribute *attrs, - struct fw_attribute_group *group) +static void init_fw_attribute_group(struct device *dev, + struct device_attribute *attrs, + struct fw_attribute_group *group) { struct device_attribute *attr; int i, j; @@ -321,9 +319,8 @@ init_fw_attribute_group(struct device *dev, dev->groups = group->groups; } -static ssize_t -modalias_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_unit *unit = fw_unit(dev); int length; @@ -334,9 +331,8 @@ modalias_show(struct device *dev, return length + 1; } -static ssize_t -rom_index_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rom_index_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev->parent); struct fw_unit *unit = fw_unit(dev); @@ -351,8 +347,8 @@ static struct device_attribute fw_unit_attributes[] = { __ATTR_NULL, }; -static ssize_t -config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t config_rom_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); size_t length; @@ -365,8 +361,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) return length; } -static ssize_t -guid_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t guid_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); int ret; @@ -385,8 +381,8 @@ static struct device_attribute fw_device_attributes[] = { __ATTR_NULL, }; -static int -read_rom(struct fw_device *device, int generation, int index, u32 *data) +static int read_rom(struct fw_device *device, + int generation, int index, u32 *data) { int rcode; diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 655d7e8..41483f1 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -180,8 +180,7 @@ struct fw_driver { const struct fw_device_id *id_table; }; -static inline struct fw_driver * -fw_driver(struct device_driver *drv) +static inline struct fw_driver *fw_driver(struct device_driver *drv) { return container_of(drv, struct fw_driver, driver); } diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index cb32b20..3ff2cfc 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -28,9 +28,8 @@ #include "fw-topology.h" #include "fw-device.h" -int -fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, - int page_count, enum dma_data_direction direction) +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction) { int i, j; dma_addr_t address; @@ -105,10 +104,9 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, buffer->pages = NULL; } -struct fw_iso_context * -fw_iso_context_create(struct fw_card *card, int type, - int channel, int speed, size_t header_size, - fw_iso_callback_t callback, void *callback_data) +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data) { struct fw_iso_context *ctx; @@ -134,25 +132,23 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx) card->driver->free_iso_context(ctx); } -int -fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags) { return ctx->card->driver->start_iso(ctx, cycle, sync, tags); } -int -fw_iso_context_queue(struct fw_iso_context *ctx, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct fw_card *card = ctx->card; return card->driver->queue_iso(ctx, packet, buffer, payload); } -int -fw_iso_context_stop(struct fw_iso_context *ctx) +int fw_iso_context_stop(struct fw_iso_context *ctx) { return ctx->card->driver->stop_iso(ctx); } diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index b941ab3..4c7cf15 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -441,9 +441,8 @@ static inline void flush_writes(const struct fw_ohci *ohci) reg_read(ohci, OHCI1394_Version); } -static int -ohci_update_phy_reg(struct fw_card *card, int addr, - int clear_bits, int set_bits) +static int ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) { struct fw_ohci *ohci = fw_ohci(card); u32 val, old; @@ -658,8 +657,8 @@ static void ar_context_tasklet(unsigned long data) } } -static int -ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) +static int ar_context_init(struct ar_context *ctx, + struct fw_ohci *ohci, u32 regs) { struct ar_buffer ab; @@ -690,8 +689,7 @@ static void ar_context_run(struct ar_context *ctx) flush_writes(ctx->ohci); } -static struct descriptor * -find_branch_descriptor(struct descriptor *d, int z) +static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) { int b, key; @@ -751,8 +749,7 @@ static void context_tasklet(unsigned long data) * Allocate a new buffer and add it to the list of free buffers for this * context. Must be called with ohci->lock held. */ -static int -context_add_buffer(struct context *ctx) +static int context_add_buffer(struct context *ctx) { struct descriptor_buffer *desc; dma_addr_t uninitialized_var(bus_addr); @@ -781,9 +778,8 @@ context_add_buffer(struct context *ctx) return 0; } -static int -context_init(struct context *ctx, struct fw_ohci *ohci, - u32 regs, descriptor_callback_t callback) +static int context_init(struct context *ctx, struct fw_ohci *ohci, + u32 regs, descriptor_callback_t callback) { ctx->ohci = ohci; ctx->regs = regs; @@ -814,8 +810,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci, return 0; } -static void -context_release(struct context *ctx) +static void context_release(struct context *ctx) { struct fw_card *card = &ctx->ohci->card; struct descriptor_buffer *desc, *tmp; @@ -827,8 +822,8 @@ context_release(struct context *ctx) } /* Must be called with ohci->lock held */ -static struct descriptor * -context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) +static struct descriptor *context_get_descriptors(struct context *ctx, + int z, dma_addr_t *d_bus) { struct descriptor *d = NULL; struct descriptor_buffer *desc = ctx->buffer_tail; @@ -912,8 +907,8 @@ struct driver_data { * Must always be called with the ochi->lock held to ensure proper * generation handling and locking around packet queue manipulation. */ -static int -at_context_queue_packet(struct context *ctx, struct fw_packet *packet) +static int at_context_queue_packet(struct context *ctx, + struct fw_packet *packet) { struct fw_ohci *ohci = ctx->ohci; dma_addr_t d_bus, uninitialized_var(payload_bus); @@ -1095,8 +1090,8 @@ static int handle_at_packet(struct context *context, #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) -static void -handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) +static void handle_local_rom(struct fw_ohci *ohci, + struct fw_packet *packet, u32 csr) { struct fw_packet response; int tcode, length, i; @@ -1122,8 +1117,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) fw_core_handle_response(&ohci->card, &response); } -static void -handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) +static void handle_local_lock(struct fw_ohci *ohci, + struct fw_packet *packet, u32 csr) { struct fw_packet response; int tcode, length, ext_tcode, sel; @@ -1164,8 +1159,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) fw_core_handle_response(&ohci->card, &response); } -static void -handle_local_request(struct context *ctx, struct fw_packet *packet) +static void handle_local_request(struct context *ctx, struct fw_packet *packet) { u64 offset; u32 csr; @@ -1205,8 +1199,7 @@ handle_local_request(struct context *ctx, struct fw_packet *packet) } } -static void -at_context_transmit(struct context *ctx, struct fw_packet *packet) +static void at_context_transmit(struct context *ctx, struct fw_packet *packet) { unsigned long flags; int ret; @@ -1590,8 +1583,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) return 0; } -static int -ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) +static int ohci_set_config_rom(struct fw_card *card, + u32 *config_rom, size_t length) { struct fw_ohci *ohci; unsigned long flags; @@ -1711,8 +1704,8 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) return ret; } -static int -ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) +static int ohci_enable_phys_dma(struct fw_card *card, + int node_id, int generation) { #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA return 0; @@ -1752,8 +1745,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ } -static u64 -ohci_get_bus_time(struct fw_card *card) +static u64 ohci_get_bus_time(struct fw_card *card) { struct fw_ohci *ohci = fw_ohci(card); u32 cycle_time; @@ -1884,8 +1876,8 @@ static int handle_it_packet(struct context *context, return 1; } -static struct fw_iso_context * -ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) +static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, + int type, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); struct iso_context *ctx, *list; @@ -2025,11 +2017,10 @@ static void ohci_free_iso_context(struct fw_iso_context *base) spin_unlock_irqrestore(&ohci->lock, flags); } -static int -ohci_queue_iso_transmit(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso_transmit(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d, *last, *pd; @@ -2124,11 +2115,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base, return 0; } -static int -ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); struct db_descriptor *db = NULL; @@ -2205,11 +2195,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, return 0; } -static int -ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d = NULL, *pd = NULL; @@ -2283,11 +2272,10 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, return 0; } -static int -ohci_queue_iso(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); unsigned long flags; @@ -2353,8 +2341,8 @@ static void ohci_pmac_off(struct pci_dev *dev) #define ohci_pmac_off(dev) #endif /* CONFIG_PPC_PMAC */ -static int __devinit -pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) +static int __devinit pci_probe(struct pci_dev *dev, + const struct pci_device_id *ent) { struct fw_ohci *ohci; u32 bus_options, max_receive, link_speed, version; diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index c71c441..2bcf515 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -392,20 +392,18 @@ static const struct { } }; -static void -free_orb(struct kref *kref) +static void free_orb(struct kref *kref) { struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); kfree(orb); } -static void -sbp2_status_write(struct fw_card *card, struct fw_request *request, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void sbp2_status_write(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) { struct sbp2_logical_unit *lu = callback_data; struct sbp2_orb *orb; @@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, fw_send_response(card, request, RCODE_COMPLETE); } -static void -complete_transaction(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) { struct sbp2_orb *orb = data; unsigned long flags; @@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode, kref_put(&orb->kref, free_orb); } -static void -sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, - int node_id, int generation, u64 offset) +static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, + int node_id, int generation, u64 offset) { struct fw_device *device = fw_device(lu->tgt->unit->device.parent); unsigned long flags; @@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) return retval; } -static void -complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) +static void complete_management_orb(struct sbp2_orb *base_orb, + struct sbp2_status *status) { struct sbp2_management_orb *orb = container_of(base_orb, struct sbp2_management_orb, base); @@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) complete(&orb->done); } -static int -sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, - int generation, int function, int lun_or_login_id, - void *response) +static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, + int generation, int function, + int lun_or_login_id, void *response) { struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct sbp2_management_orb *orb; @@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu) &d, sizeof(d)); } -static void -complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_agent_reset_write_no_wait(struct fw_card *card, + int rcode, void *payload, size_t length, void *data) { kfree(data); } @@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device, sizeof(orb->page_table), DMA_TO_DEVICE); } -static unsigned int -sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) +static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) { int sam_status; @@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) } } -static void -complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) +static void complete_command_orb(struct sbp2_orb *base_orb, + struct sbp2_status *status) { struct sbp2_command_orb *orb = container_of(base_orb, struct sbp2_command_orb, base); @@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) orb->done(orb->cmd); } -static int -sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, - struct sbp2_logical_unit *lu) +static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, + struct fw_device *device, struct sbp2_logical_unit *lu) { struct scatterlist *sg = scsi_sglist(orb->cmd); int i, n; @@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) * This is the concatenation of target port identifier and logical unit * identifier as per SAM-2...SAM-4 annex A. */ -static ssize_t -sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct sbp2_logical_unit *lu; diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 8dd6703..b44131c 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c @@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card, struct fw_node * node, struct fw_node * parent); -static void -for_each_fw_node(struct fw_card *card, struct fw_node *root, - fw_node_callback_t callback) +static void for_each_fw_node(struct fw_card *card, struct fw_node *root, + fw_node_callback_t callback) { struct list_head list; struct fw_node *node, *next, *child, *parent; @@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root, fw_node_put(node); } -static void -report_lost_node(struct fw_card *card, - struct fw_node *node, struct fw_node *parent) +static void report_lost_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) { fw_node_event(card, node, FW_NODE_DESTROYED); fw_node_put(node); @@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card, card->bm_retries = 0; } -static void -report_found_node(struct fw_card *card, - struct fw_node *node, struct fw_node *parent) +static void report_found_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) { int b_path = (node->phy_speed == SCODE_BETA); @@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) * found, lost or updated. Update the nodes in the card topology tree * as we go. */ -static void -update_tree(struct fw_card *card, struct fw_node *root) +static void update_tree(struct fw_card *card, struct fw_node *root) { struct list_head list0, list1; struct fw_node *node0, *node1, *next1; @@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root) } } -static void -update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) +static void update_topology_map(struct fw_card *card, + u32 *self_ids, int self_id_count) { int node_count; @@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) fw_compute_block_crc(card->topology_map); } -void -fw_core_handle_bus_reset(struct fw_card *card, - int node_id, int generation, - int self_id_count, u32 * self_ids) +void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, + int self_id_count, u32 *self_ids) { struct fw_node *local_node; unsigned long flags; diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h index addb9f8..7e930f8 100644 --- a/drivers/firewire/fw-topology.h +++ b/drivers/firewire/fw-topology.h @@ -51,26 +51,21 @@ struct fw_node { struct fw_node *ports[0]; }; -static inline struct fw_node * -fw_node_get(struct fw_node *node) +static inline struct fw_node *fw_node_get(struct fw_node *node) { atomic_inc(&node->ref_count); return node; } -static inline void -fw_node_put(struct fw_node *node) +static inline void fw_node_put(struct fw_node *node) { if (atomic_dec_and_test(&node->ref_count)) kfree(node); } -void -fw_destroy_nodes(struct fw_card *card); - -int -fw_compute_block_crc(u32 *block); +void fw_destroy_nodes(struct fw_card *card); +int fw_compute_block_crc(u32 *block); #endif /* __fw_topology_h */ diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index e17ebc4..1537737 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -64,10 +64,9 @@ #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) #define PHY_IDENTIFIER(id) ((id) << 30) -static int -close_transaction(struct fw_transaction *transaction, - struct fw_card *card, int rcode, - u32 *payload, size_t length) +static int close_transaction(struct fw_transaction *transaction, + struct fw_card *card, int rcode, + u32 *payload, size_t length) { struct fw_transaction *t; unsigned long flags; @@ -94,9 +93,8 @@ close_transaction(struct fw_transaction *transaction, * Only valid for transactions that are potentially pending (ie have * been sent). */ -int -fw_cancel_transaction(struct fw_card *card, - struct fw_transaction *transaction) +int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction) { /* * Cancel the packet transmission if it's still queued. That @@ -116,9 +114,8 @@ fw_cancel_transaction(struct fw_card *card, } EXPORT_SYMBOL(fw_cancel_transaction); -static void -transmit_complete_callback(struct fw_packet *packet, - struct fw_card *card, int status) +static void transmit_complete_callback(struct fw_packet *packet, + struct fw_card *card, int status) { struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); @@ -151,8 +148,7 @@ transmit_complete_callback(struct fw_packet *packet, } } -static void -fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, +static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, int destination_id, int source_id, int generation, int speed, unsigned long long offset, void *payload, size_t length) { @@ -247,12 +243,10 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, * @param callback_data pointer to arbitrary data, which will be * passed to the callback */ -void -fw_send_request(struct fw_card *card, struct fw_transaction *t, - int tcode, int destination_id, int generation, int speed, - unsigned long long offset, - void *payload, size_t length, - fw_transaction_callback_t callback, void *callback_data) +void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, + int destination_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data) { unsigned long flags; int tlabel; @@ -322,8 +316,8 @@ static void transaction_callback(struct fw_card *card, int rcode, * Returns the RCODE. */ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, - int generation, int speed, unsigned long long offset, - void *data, size_t length) + int generation, int speed, unsigned long long offset, + void *data, size_t length) { struct transaction_callback_data d; struct fw_transaction t; @@ -399,9 +393,8 @@ void fw_flush_transactions(struct fw_card *card) } } -static struct fw_address_handler * -lookup_overlapping_address_handler(struct list_head *list, - unsigned long long offset, size_t length) +static struct fw_address_handler *lookup_overlapping_address_handler( + struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; @@ -414,9 +407,8 @@ lookup_overlapping_address_handler(struct list_head *list, return NULL; } -static struct fw_address_handler * -lookup_enclosing_address_handler(struct list_head *list, - unsigned long long offset, size_t length) +static struct fw_address_handler *lookup_enclosing_address_handler( + struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; @@ -463,9 +455,8 @@ const struct fw_address_region fw_unit_space_region = * The start offset of the handler's address region is determined by * fw_core_add_address_handler() and is returned in handler->offset. */ -int -fw_core_add_address_handler(struct fw_address_handler *handler, - const struct fw_address_region *region) +int fw_core_add_address_handler(struct fw_address_handler *handler, + const struct fw_address_region *region) { struct fw_address_handler *other; unsigned long flags; @@ -522,9 +513,8 @@ struct fw_request { u32 data[0]; }; -static void -free_response_callback(struct fw_packet *packet, - struct fw_card *card, int status) +static void free_response_callback(struct fw_packet *packet, + struct fw_card *card, int status) { struct fw_request *request; @@ -532,9 +522,8 @@ free_response_callback(struct fw_packet *packet, kfree(request); } -void -fw_fill_response(struct fw_packet *response, u32 *request_header, - int rcode, void *payload, size_t length) +void fw_fill_response(struct fw_packet *response, u32 *request_header, + int rcode, void *payload, size_t length) { int tcode, tlabel, extended_tcode, source, destination; @@ -592,8 +581,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header, } EXPORT_SYMBOL(fw_fill_response); -static struct fw_request * -allocate_request(struct fw_packet *p) +static struct fw_request *allocate_request(struct fw_packet *p) { struct fw_request *request; u32 *data, length; @@ -653,8 +641,8 @@ allocate_request(struct fw_packet *p) return request; } -void -fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode) { /* unified transaction or broadcast transaction: don't respond */ if (request->ack != ACK_PENDING || @@ -674,8 +662,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) } EXPORT_SYMBOL(fw_send_response); -void -fw_core_handle_request(struct fw_card *card, struct fw_packet *p) +void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) { struct fw_address_handler *handler; struct fw_request *request; @@ -723,8 +710,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p) } EXPORT_SYMBOL(fw_core_handle_request); -void -fw_core_handle_response(struct fw_card *card, struct fw_packet *p) +void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { struct fw_transaction *t; unsigned long flags; @@ -797,12 +783,10 @@ static const struct fw_address_region topology_map_region = { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; -static void -handle_topology_map(struct fw_card *card, struct fw_request *request, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void handle_topology_map(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + int speed, unsigned long long offset, + void *payload, size_t length, void *callback_data) { int i, start, end; __be32 *map; @@ -836,12 +820,10 @@ static const struct fw_address_region registers_region = { .start = CSR_REGISTER_BASE, .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; -static void -handle_registers(struct fw_card *card, struct fw_request *request, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void handle_registers(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + int speed, unsigned long long offset, + void *payload, size_t length, void *callback_data) { int reg = offset & ~CSR_REGISTER_BASE; unsigned long long bus_time; diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 1d78e9c..7112e62 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -88,8 +88,7 @@ #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) -static inline void -fw_memcpy_from_be32(void *_dst, void *_src, size_t size) +static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size) { u32 *dst = _dst; __be32 *src = _src; @@ -99,8 +98,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size) dst[i] = be32_to_cpu(src[i]); } -static inline void -fw_memcpy_to_be32(void *_dst, void *_src, size_t size) +static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size) { fw_memcpy_from_be32(_dst, _src, size); } @@ -125,8 +123,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet, struct fw_card *card, int status); typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, - void *data, - size_t length, + void *data, size_t length, void *callback_data); /* @@ -201,7 +198,6 @@ struct fw_address_handler { struct list_head link; }; - struct fw_address_region { u64 start; u64 end; @@ -315,10 +311,8 @@ struct fw_iso_packet { struct fw_iso_context; typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, - u32 cycle, - size_t header_length, - void *header, - void *data); + u32 cycle, size_t header_length, + void *header, void *data); /* * An iso buffer is just a set of pages mapped for DMA in the @@ -344,36 +338,22 @@ struct fw_iso_context { void *callback_data; }; -int -fw_iso_buffer_init(struct fw_iso_buffer *buffer, - struct fw_card *card, - int page_count, - enum dma_data_direction direction); -int -fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); -void -fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); - -struct fw_iso_context * -fw_iso_context_create(struct fw_card *card, int type, - int channel, int speed, size_t header_size, - fw_iso_callback_t callback, void *callback_data); - -void -fw_iso_context_destroy(struct fw_iso_context *ctx); - -int -fw_iso_context_queue(struct fw_iso_context *ctx, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload); - -int -fw_iso_context_start(struct fw_iso_context *ctx, - int cycle, int sync, int tags); - -int -fw_iso_context_stop(struct fw_iso_context *ctx); +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction); +int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); +void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data); +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload); +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags); +int fw_iso_context_stop(struct fw_iso_context *ctx); +void fw_iso_context_destroy(struct fw_iso_context *ctx); struct fw_card_driver { /* @@ -429,24 +409,18 @@ struct fw_card_driver { int (*stop_iso)(struct fw_iso_context *ctx); }; -int -fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); +int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); -void -fw_send_request(struct fw_card *card, struct fw_transaction *t, +void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *data, size_t length, fw_transaction_callback_t callback, void *callback_data); - -int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, - int generation, int speed, unsigned long long offset, - void *data, size_t length); - int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction); - void fw_flush_transactions(struct fw_card *card); - +int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *data, size_t length); void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count); @@ -454,29 +428,18 @@ void fw_send_phy_config(struct fw_card *card, * Called by the topology code to inform the device code of node * activity; found, lost, or updated nodes. */ -void -fw_node_event(struct fw_card *card, struct fw_node *node, int event); +void fw_node_event(struct fw_card *card, struct fw_node *node, int event); /* API used by card level drivers */ -void -fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, - struct device *device); -int -fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid); - -void -fw_core_remove_card(struct fw_card *card); - -void -fw_core_handle_bus_reset(struct fw_card *card, - int node_id, int generation, - int self_id_count, u32 *self_ids); -void -fw_core_handle_request(struct fw_card *card, struct fw_packet *request); - -void -fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); +void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, struct device *device); +int fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid); +void fw_core_remove_card(struct fw_card *card); +void fw_core_handle_bus_reset(struct fw_card *card, int node_id, + int generation, int self_id_count, u32 *self_ids); +void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); +void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); #endif /* __fw_transaction_h */ -- cgit v0.10.2 From da62df141e3f879445e3daef36bd3a12c90841e2 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 14 Dec 2008 21:47:36 +0100 Subject: firewire: core: remove unused definitions Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 7112e62..0dd96ec 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -138,12 +138,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card, void *data, size_t length, void *callback_data); -typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle, - int node_id, int generation, - u32 *self_ids, - int self_id_count, - void *callback_data); - struct fw_packet { int speed; int generation; @@ -184,12 +178,6 @@ struct fw_transaction { void *callback_data; }; -static inline struct fw_packet * -fw_packet(struct list_head *l) -{ - return list_entry(l, struct fw_packet, link); -} - struct fw_address_handler { u64 offset; size_t length; -- cgit v0.10.2 From a459b8ab9c176143fecef8ace4b70d6dbd7a8113 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 21 Dec 2008 16:49:57 +0100 Subject: firewire: cdev: use list_first_entry Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 3c075b2..ead5c7b 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -180,7 +180,7 @@ static int dequeue_event(struct client *client, return -ENODEV; spin_lock_irqsave(&client->lock, flags); - event = container_of(client->event_list.next, struct event, link); + event = list_first_entry(&client->event_list, struct event, link); list_del(&event->link); spin_unlock_irqrestore(&client->lock, flags); -- cgit v0.10.2 From 4817ed240232e89583b0506c2d8e426739af5da3 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 21 Dec 2008 16:39:46 +0100 Subject: firewire: prevent creation of multiple IR DMA contexts for the same channel OHCI-1394 1.1 clause 10.4.3 says: "If more than one IR DMA context specifies receives for packets from the same isochronous channel, the context destination for that channel's packets is undefined." Any userspace client and in the future also kernelspace clients can allocate IR DMA contexts for any channel. We don't want them to interfere with each other, hence it is preferable to return -EBUSY if allocation of a second context for a channel is attempted. Notes: - This limitation is OHCI-1394 specific, therefore its proper place of implementation is down in the low-level driver. - Since the ABI simply maps one userspace iso client context to one hardware iso context, this OHCI-1394 limitation alas requires userspace to implement its own multiplexing of iso reception from the same channel and card to multiple clients when needed. - The limitation is independent of channel allocation at the IRM; the latter is really only important for the initiation of iso transmission but not of iso reception. - We don't need to do the same for IT DMA because OHCI-1394 does not have any ties between IT contexts and channels. Only the voluntary channel allocation protocol via the IRM, globally to the FireWire bus, can ensure proper isochronous transmit behaviour anyway. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index 3ff2cfc..39f3bac 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -110,7 +110,8 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, { struct fw_iso_context *ctx; - ctx = card->driver->allocate_iso_context(card, type, header_size); + ctx = card->driver->allocate_iso_context(card, + type, channel, header_size); if (IS_ERR(ctx)) return ctx; diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 4c7cf15..859af71 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -205,6 +205,7 @@ struct fw_ohci { u32 it_context_mask; struct iso_context *it_context_list; + u64 ir_context_channels; u32 ir_context_mask; struct iso_context *ir_context_list; }; @@ -1877,20 +1878,23 @@ static int handle_it_packet(struct context *context, } static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, - int type, size_t header_size) + int type, int channel, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); struct iso_context *ctx, *list; descriptor_callback_t callback; + u64 *channels, dont_care = ~0ULL; u32 *mask, regs; unsigned long flags; int index, ret = -ENOMEM; if (type == FW_ISO_CONTEXT_TRANSMIT) { + channels = &dont_care; mask = &ohci->it_context_mask; list = ohci->it_context_list; callback = handle_it_packet; } else { + channels = &ohci->ir_context_channels; mask = &ohci->ir_context_mask; list = ohci->ir_context_list; if (ohci->use_dualbuffer) @@ -1900,9 +1904,11 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, } spin_lock_irqsave(&ohci->lock, flags); - index = ffs(*mask) - 1; - if (index >= 0) + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); *mask &= ~(1 << index); + } spin_unlock_irqrestore(&ohci->lock, flags); if (index < 0) @@ -2012,6 +2018,7 @@ static void ohci_free_iso_context(struct fw_iso_context *base) } else { index = ctx - ohci->ir_context_list; ohci->ir_context_mask |= 1 << index; + ohci->ir_context_channels |= 1ULL << base->channel; } spin_unlock_irqrestore(&ohci->lock, flags); @@ -2424,6 +2431,7 @@ static int __devinit pci_probe(struct pci_dev *dev, ohci->it_context_list = kzalloc(size, GFP_KERNEL); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); + ohci->ir_context_channels = ~0ULL; ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 0dd96ec..48e88d5 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -383,7 +383,7 @@ struct fw_card_driver { struct fw_iso_context * (*allocate_iso_context)(struct fw_card *card, - int type, size_t header_size); + int type, int channel, size_t header_size); void (*free_iso_context)(struct fw_iso_context *ctx); int (*start_iso)(struct fw_iso_context *ctx, -- cgit v0.10.2 From 632321ecd99bf85c982a75f8329b4ecbb95b3a8f Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Fri, 2 Jan 2009 12:47:13 +0100 Subject: firewire: cdev: fix documentation of FW_CDEV_IOC_GET_INFO The FW_CDEV_IOC_GET_INFO ioctl looks at client->device->config_rom, not at the local node's config ROM. We could fix the implementation or the documentation. I believe the way how it is currently implemented is more useful than the way how it is currently documented. In fact, libdc1394 uses the ABI already as implemented, not as documented. Hence let's change the documentation. Signed-off-by: Stefan Richter diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 899ef27..86c8ff5 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -201,7 +201,7 @@ union fw_cdev_event { * case, @rom_length is updated with the actual length of the * configuration ROM. * @rom: If non-zero, address of a buffer to be filled by a copy of the - * local node's configuration ROM + * device's configuration ROM * @bus_reset: If non-zero, address of a buffer to be filled by a * &struct fw_cdev_event_bus_reset with the current state * of the bus. This does not cause a bus reset to happen. -- cgit v0.10.2 From fb4430367b0bbee2420132faf16c7c762a39c0bb Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: reference-count client instances The lifetime of struct client instances must be longer than the lifetime of any client resource. This fixes a possible race between fw_device_op_release and transaction completions. It also prepares for new ioctls for isochronous resource management which will involve delayed processing of client resources. Signed-off-by: Stefan Richter Reviewed-by: David Moore diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index ead5c7b..81362c1 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -94,8 +95,27 @@ struct client { unsigned long vm_start; struct list_head link; + struct kref kref; }; +static inline void client_get(struct client *client) +{ + kref_get(&client->kref); +} + +static void client_release(struct kref *kref) +{ + struct client *client = container_of(kref, struct client, kref); + + fw_device_put(client->device); + kfree(client); +} + +static void client_put(struct client *client) +{ + kref_put(&client->kref, client_release); +} + static inline void __user *u64_to_uptr(__u64 value) { return (void __user *)(unsigned long)value; @@ -131,6 +151,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) idr_init(&client->resource_idr); INIT_LIST_HEAD(&client->event_list); init_waitqueue_head(&client->wait); + kref_init(&client->kref); file->private_data = client; @@ -326,6 +347,8 @@ static int add_client_resource(struct client *client, else ret = idr_get_new(&client->resource_idr, resource, &resource->handle); + if (ret >= 0) + client_get(client); spin_unlock_irqrestore(&client->lock, flags); if (ret == -EAGAIN) @@ -358,6 +381,8 @@ static int release_client_resource(struct client *client, u32 handle, else r->release(client, r); + client_put(client); + return 0; } @@ -385,11 +410,21 @@ static void complete_transaction(struct fw_card *card, int rcode, spin_lock_irqsave(&client->lock, flags); /* - * If called while in shutdown, the idr tree must be left untouched. - * The idr handle will be removed later. + * 1. If called while in shutdown, the idr tree must be left untouched. + * The idr handle will be removed and the client reference will be + * dropped later. + * 2. If the call chain was release_client_resource -> + * release_transaction -> complete_transaction (instead of a normal + * conclusion of the transaction), i.e. if this resource was already + * unregistered from the idr, the client reference will be dropped + * by release_client_resource and we must not drop it here. */ - if (!client->in_shutdown) + if (!client->in_shutdown && + idr_find(&client->resource_idr, response->resource.handle)) { idr_remove(&client->resource_idr, response->resource.handle); + /* Drop the idr's reference */ + client_put(client); + } spin_unlock_irqrestore(&client->lock, flags); r->type = FW_CDEV_EVENT_RESPONSE; @@ -408,6 +443,9 @@ static void complete_transaction(struct fw_card *card, int rcode, else queue_event(client, &response->event, r, sizeof(*r) + r->length, NULL, 0); + + /* Drop the transaction callback's reference */ + client_put(client); } static int ioctl_send_request(struct client *client, void *buffer) @@ -459,6 +497,9 @@ static int ioctl_send_request(struct client *client, void *buffer) if (ret < 0) goto failed; + /* Get a reference for the transaction callback */ + client_get(client); + fw_send_request(device->card, &response->transaction, request->tcode & 0x1f, device->node->node_id, @@ -1044,6 +1085,7 @@ static int shutdown_resource(int id, void *p, void *data) struct client *client = data; r->release(client, r); + client_put(client); return 0; } @@ -1076,12 +1118,7 @@ static int fw_device_op_release(struct inode *inode, struct file *file) list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); - /* - * FIXME: client should be reference-counted. It's extremely unlikely - * but there may still be transactions being completed at this point. - */ - fw_device_put(client->device); - kfree(client); + client_put(client); return 0; } -- cgit v0.10.2 From 97c18b7fd6df4ae0d32509f292a2eb0d4b26d623 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: unify names of struct types and of their instances to indicate that they are specializations of struct event or of struct client_resource, respectively. struct response was both an event and a client_resource; it is now split into struct outbound_transaction_resource and ~_event in order to document more explicitly which types of client resources exist. struct request and struct_request_event are renamed to struct inbound_transaction_resource and ~_event because requests and responses occur in outbound and in inbound transactions. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 81362c1..85e866e 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -41,43 +41,6 @@ #include "fw-topology.h" #include "fw-device.h" -struct client; -struct client_resource; -typedef void (*client_resource_release_fn_t)(struct client *, - struct client_resource *); -struct client_resource { - client_resource_release_fn_t release; - int handle; -}; - -/* - * dequeue_event() just kfree()'s the event, so the event has to be - * the first field in the struct. - */ - -struct event { - struct { void *data; size_t size; } v[2]; - struct list_head link; -}; - -struct bus_reset { - struct event event; - struct fw_cdev_event_bus_reset reset; -}; - -struct response { - struct event event; - struct fw_transaction transaction; - struct client *client; - struct client_resource resource; - struct fw_cdev_event_response response; -}; - -struct iso_interrupt { - struct event event; - struct fw_cdev_event_iso_interrupt interrupt; -}; - struct client { u32 version; struct fw_device *device; @@ -116,6 +79,70 @@ static void client_put(struct client *client) kref_put(&client->kref, client_release); } +struct client_resource; +typedef void (*client_resource_release_fn_t)(struct client *, + struct client_resource *); +struct client_resource { + client_resource_release_fn_t release; + int handle; +}; + +struct address_handler_resource { + struct client_resource resource; + struct fw_address_handler handler; + __u64 closure; + struct client *client; +}; + +struct outbound_transaction_resource { + struct client_resource resource; + struct fw_transaction transaction; +}; + +struct inbound_transaction_resource { + struct client_resource resource; + struct fw_request *request; + void *data; + size_t length; +}; + +struct descriptor_resource { + struct client_resource resource; + struct fw_descriptor descriptor; + u32 data[0]; +}; + +/* + * dequeue_event() just kfree()'s the event, so the event has to be + * the first field in a struct XYZ_event. + */ +struct event { + struct { void *data; size_t size; } v[2]; + struct list_head link; +}; + +struct bus_reset_event { + struct event event; + struct fw_cdev_event_bus_reset reset; +}; + +struct outbound_transaction_event { + struct event event; + struct client *client; + struct outbound_transaction_resource r; + struct fw_cdev_event_response response; +}; + +struct inbound_transaction_event { + struct event event; + struct fw_cdev_event_request request; +}; + +struct iso_interrupt_event { + struct event event; + struct fw_cdev_event_iso_interrupt interrupt; +}; + static inline void __user *u64_to_uptr(__u64 value) { return (void __user *)(unsigned long)value; @@ -263,18 +290,18 @@ static void for_each_client(struct fw_device *device, static void queue_bus_reset_event(struct client *client) { - struct bus_reset *bus_reset; + struct bus_reset_event *e; - bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL); - if (bus_reset == NULL) { + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (e == NULL) { fw_notify("Out of memory when allocating bus reset event\n"); return; } - fill_bus_reset_event(&bus_reset->reset, client); + fill_bus_reset_event(&e->reset, client); - queue_event(client, &bus_reset->event, - &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); + queue_event(client, &e->event, + &e->reset, sizeof(e->reset), NULL, 0); } void fw_device_cdev_update(struct fw_device *device) @@ -389,24 +416,24 @@ static int release_client_resource(struct client *client, u32 handle, static void release_transaction(struct client *client, struct client_resource *resource) { - struct response *response = - container_of(resource, struct response, resource); + struct outbound_transaction_resource *r = container_of(resource, + struct outbound_transaction_resource, resource); - fw_cancel_transaction(client->device->card, &response->transaction); + fw_cancel_transaction(client->device->card, &r->transaction); } static void complete_transaction(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { - struct response *response = data; - struct client *client = response->client; + struct outbound_transaction_event *e = data; + struct fw_cdev_event_response *rsp = &e->response; + struct client *client = e->client; unsigned long flags; - struct fw_cdev_event_response *r = &response->response; - if (length < r->length) - r->length = length; + if (length < rsp->length) + rsp->length = length; if (rcode == RCODE_COMPLETE) - memcpy(r->data, payload, r->length); + memcpy(rsp->data, payload, rsp->length); spin_lock_irqsave(&client->lock, flags); /* @@ -420,28 +447,28 @@ static void complete_transaction(struct fw_card *card, int rcode, * by release_client_resource and we must not drop it here. */ if (!client->in_shutdown && - idr_find(&client->resource_idr, response->resource.handle)) { - idr_remove(&client->resource_idr, response->resource.handle); + idr_find(&client->resource_idr, e->r.resource.handle)) { + idr_remove(&client->resource_idr, e->r.resource.handle); /* Drop the idr's reference */ client_put(client); } spin_unlock_irqrestore(&client->lock, flags); - r->type = FW_CDEV_EVENT_RESPONSE; - r->rcode = rcode; + rsp->type = FW_CDEV_EVENT_RESPONSE; + rsp->rcode = rcode; /* - * In the case that sizeof(*r) doesn't align with the position of the + * In the case that sizeof(*rsp) doesn't align with the position of the * data, and the read is short, preserve an extra copy of the data * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless * for short reads and some apps depended on it, this is both safe * and prudent for compatibility. */ - if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) - queue_event(client, &response->event, r, sizeof(*r), - r->data, r->length); + if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) + queue_event(client, &e->event, rsp, sizeof(*rsp), + rsp->data, rsp->length); else - queue_event(client, &response->event, r, sizeof(*r) + r->length, + queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); /* Drop the transaction callback's reference */ @@ -452,23 +479,23 @@ static int ioctl_send_request(struct client *client, void *buffer) { struct fw_device *device = client->device; struct fw_cdev_send_request *request = buffer; - struct response *response; + struct outbound_transaction_event *e; int ret; /* What is the biggest size we'll accept, really? */ if (request->length > 4096) return -EINVAL; - response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); - if (response == NULL) + e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); + if (e == NULL) return -ENOMEM; - response->client = client; - response->response.length = request->length; - response->response.closure = request->closure; + e->client = client; + e->response.length = request->length; + e->response.closure = request->closure; if (request->data && - copy_from_user(response->response.data, + copy_from_user(e->response.data, u64_to_uptr(request->data), request->length)) { ret = -EFAULT; goto failed; @@ -492,86 +519,66 @@ static int ioctl_send_request(struct client *client, void *buffer) goto failed; } - response->resource.release = release_transaction; - ret = add_client_resource(client, &response->resource, GFP_KERNEL); + e->r.resource.release = release_transaction; + ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); if (ret < 0) goto failed; /* Get a reference for the transaction callback */ client_get(client); - fw_send_request(device->card, &response->transaction, + fw_send_request(device->card, &e->r.transaction, request->tcode & 0x1f, device->node->node_id, request->generation, device->max_speed, request->offset, - response->response.data, request->length, - complete_transaction, response); + e->response.data, request->length, + complete_transaction, e); if (request->data) return sizeof(request) + request->length; else return sizeof(request); failed: - kfree(response); + kfree(e); return ret; } -struct address_handler { - struct fw_address_handler handler; - __u64 closure; - struct client *client; - struct client_resource resource; -}; - -struct request { - struct fw_request *request; - void *data; - size_t length; - struct client_resource resource; -}; - -struct request_event { - struct event event; - struct fw_cdev_event_request request; -}; - static void release_request(struct client *client, struct client_resource *resource) { - struct request *request = - container_of(resource, struct request, resource); + struct inbound_transaction_resource *r = container_of(resource, + struct inbound_transaction_resource, resource); - fw_send_response(client->device->card, request->request, + fw_send_response(client->device->card, r->request, RCODE_CONFLICT_ERROR); - kfree(request); + kfree(r); } -static void handle_request(struct fw_card *card, struct fw_request *r, +static void handle_request(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, int speed, unsigned long long offset, void *payload, size_t length, void *callback_data) { - struct address_handler *handler = callback_data; - struct request *request; - struct request_event *e; - struct client *client = handler->client; + struct address_handler_resource *handler = callback_data; + struct inbound_transaction_resource *r; + struct inbound_transaction_event *e; int ret; - request = kmalloc(sizeof(*request), GFP_ATOMIC); + r = kmalloc(sizeof(*r), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (request == NULL || e == NULL) + if (r == NULL || e == NULL) goto failed; - request->request = r; - request->data = payload; - request->length = length; + r->request = request; + r->data = payload; + r->length = length; - request->resource.release = release_request; - ret = add_client_resource(client, &request->resource, GFP_ATOMIC); + r->resource.release = release_request; + ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); if (ret < 0) goto failed; @@ -579,61 +586,61 @@ static void handle_request(struct fw_card *card, struct fw_request *r, e->request.tcode = tcode; e->request.offset = offset; e->request.length = length; - e->request.handle = request->resource.handle; + e->request.handle = r->resource.handle; e->request.closure = handler->closure; - queue_event(client, &e->event, + queue_event(handler->client, &e->event, &e->request, sizeof(e->request), payload, length); return; failed: - kfree(request); + kfree(r); kfree(e); - fw_send_response(card, r, RCODE_CONFLICT_ERROR); + fw_send_response(card, request, RCODE_CONFLICT_ERROR); } static void release_address_handler(struct client *client, struct client_resource *resource) { - struct address_handler *handler = - container_of(resource, struct address_handler, resource); + struct address_handler_resource *r = + container_of(resource, struct address_handler_resource, resource); - fw_core_remove_address_handler(&handler->handler); - kfree(handler); + fw_core_remove_address_handler(&r->handler); + kfree(r); } static int ioctl_allocate(struct client *client, void *buffer) { struct fw_cdev_allocate *request = buffer; - struct address_handler *handler; + struct address_handler_resource *r; struct fw_address_region region; int ret; - handler = kmalloc(sizeof(*handler), GFP_KERNEL); - if (handler == NULL) + r = kmalloc(sizeof(*r), GFP_KERNEL); + if (r == NULL) return -ENOMEM; region.start = request->offset; region.end = request->offset + request->length; - handler->handler.length = request->length; - handler->handler.address_callback = handle_request; - handler->handler.callback_data = handler; - handler->closure = request->closure; - handler->client = client; + r->handler.length = request->length; + r->handler.address_callback = handle_request; + r->handler.callback_data = r; + r->closure = request->closure; + r->client = client; - ret = fw_core_add_address_handler(&handler->handler, ®ion); + ret = fw_core_add_address_handler(&r->handler, ®ion); if (ret < 0) { - kfree(handler); + kfree(r); return ret; } - handler->resource.release = release_address_handler; - ret = add_client_resource(client, &handler->resource, GFP_KERNEL); + r->resource.release = release_address_handler; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); if (ret < 0) { - release_address_handler(client, &handler->resource); + release_address_handler(client, &r->resource); return ret; } - request->handle = handler->resource.handle; + request->handle = r->resource.handle; return 0; } @@ -650,13 +657,14 @@ static int ioctl_send_response(struct client *client, void *buffer) { struct fw_cdev_send_response *request = buffer; struct client_resource *resource; - struct request *r; + struct inbound_transaction_resource *r; if (release_client_resource(client, request->handle, release_request, &resource) < 0) return -EINVAL; - r = container_of(resource, struct request, resource); + r = container_of(resource, struct inbound_transaction_resource, + resource); if (request->length < r->length) r->length = request->length; if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) @@ -678,62 +686,55 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer) return fw_core_initiate_bus_reset(client->device->card, short_reset); } -struct descriptor { - struct fw_descriptor d; - struct client_resource resource; - u32 data[0]; -}; - static void release_descriptor(struct client *client, struct client_resource *resource) { - struct descriptor *descriptor = - container_of(resource, struct descriptor, resource); + struct descriptor_resource *r = + container_of(resource, struct descriptor_resource, resource); - fw_core_remove_descriptor(&descriptor->d); - kfree(descriptor); + fw_core_remove_descriptor(&r->descriptor); + kfree(r); } static int ioctl_add_descriptor(struct client *client, void *buffer) { struct fw_cdev_add_descriptor *request = buffer; - struct descriptor *descriptor; + struct descriptor_resource *r; int ret; if (request->length > 256) return -EINVAL; - descriptor = - kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); - if (descriptor == NULL) + r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); + if (r == NULL) return -ENOMEM; - if (copy_from_user(descriptor->data, + if (copy_from_user(r->data, u64_to_uptr(request->data), request->length * 4)) { ret = -EFAULT; goto failed; } - descriptor->d.length = request->length; - descriptor->d.immediate = request->immediate; - descriptor->d.key = request->key; - descriptor->d.data = descriptor->data; + r->descriptor.length = request->length; + r->descriptor.immediate = request->immediate; + r->descriptor.key = request->key; + r->descriptor.data = r->data; - ret = fw_core_add_descriptor(&descriptor->d); + ret = fw_core_add_descriptor(&r->descriptor); if (ret < 0) goto failed; - descriptor->resource.release = release_descriptor; - ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL); + r->resource.release = release_descriptor; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); if (ret < 0) { - fw_core_remove_descriptor(&descriptor->d); + fw_core_remove_descriptor(&r->descriptor); goto failed; } - request->handle = descriptor->resource.handle; + request->handle = r->resource.handle; return 0; failed: - kfree(descriptor); + kfree(r); return ret; } @@ -750,19 +751,19 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct client *client = data; - struct iso_interrupt *irq; + struct iso_interrupt_event *e; - irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); - if (irq == NULL) + e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); + if (e == NULL) return; - irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; - irq->interrupt.closure = client->iso_closure; - irq->interrupt.cycle = cycle; - irq->interrupt.header_length = header_length; - memcpy(irq->interrupt.header, header, header_length); - queue_event(client, &irq->event, &irq->interrupt, - sizeof(irq->interrupt) + header_length, NULL, 0); + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; + e->interrupt.closure = client->iso_closure; + e->interrupt.cycle = cycle; + e->interrupt.header_length = header_length; + memcpy(e->interrupt.header, header, header_length); + queue_event(client, &e->event, &e->interrupt, + sizeof(e->interrupt) + header_length, NULL, 0); } static int ioctl_create_iso_context(struct client *client, void *buffer) -- cgit v0.10.2 From be5bbd6756b44602a3f281af05c2f416fa9bd1c6 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: sort includes Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 85e866e..4c33b51 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -18,28 +18,30 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include +#include +#include #include #include #include -#include #include -#include -#include -#include -#include -#include +#include +#include +#include + #include #include -#include "fw-transaction.h" -#include "fw-topology.h" + #include "fw-device.h" +#include "fw-topology.h" +#include "fw-transaction.h" struct client { u32 version; -- cgit v0.10.2 From b769bd17656f991c5588c676376e5ec77d25997a Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: core: topology header fix Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h index 7e930f8..3c497bb 100644 --- a/drivers/firewire/fw-topology.h +++ b/drivers/firewire/fw-topology.h @@ -19,6 +19,11 @@ #ifndef __fw_topology_h #define __fw_topology_h +#include +#include + +#include + enum { FW_NODE_CREATED, FW_NODE_UPDATED, @@ -64,6 +69,7 @@ static inline void fw_node_put(struct fw_node *node) kfree(node); } +struct fw_card; void fw_destroy_nodes(struct fw_card *card); int fw_compute_block_crc(u32 *block); -- cgit v0.10.2 From b1bda4cdc2037447bd66753bf5ccab66d91b0b59 Mon Sep 17 00:00:00 2001 From: "Jay Fenlason, Stefan Richter" Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: add ioctls for isochronous resource management Based on Date: Tue, 18 Nov 2008 11:41:27 -0500 From: Jay Fenlason Subject: [Patch V4] Add ISO resource management support with several changes to the ABI and implementation. Only the part of the ABI which enables auto-reallocation and auto-deallocation is included here. This implements ioctls for kernel-assisted allocation of isochronous channels and isochronous bandwidth. The benefits are: - The client does not have to have write access to the /dev/fw* device corresponding to the IRM. - The client does not have to perform reallocation after bus resets. - Channel and bandwidth are deallocated by the kernel if the file is closed before the client deallocated the resources. Thus resources are released even if the client crashes. It is anticipated that future in-kernel code (firewire-core IRM code; the firewire port of firedtv), will use the fw-iso.c portions of this code too. Signed-off-by: Stefan Richter Tested-by: David Moore diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 4c33b51..a227853 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include #include @@ -114,6 +116,21 @@ struct descriptor_resource { u32 data[0]; }; +struct iso_resource { + struct client_resource resource; + struct client *client; + /* Schedule work and access todo only with client->lock held. */ + struct delayed_work work; + enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,} todo; + int generation; + u64 channels; + s32 bandwidth; + struct iso_resource_event *e_alloc, *e_dealloc; +}; + +static void schedule_iso_resource(struct iso_resource *); +static void release_iso_resource(struct client *, struct client_resource *); + /* * dequeue_event() just kfree()'s the event, so the event has to be * the first field in a struct XYZ_event. @@ -145,6 +162,11 @@ struct iso_interrupt_event { struct fw_cdev_event_iso_interrupt interrupt; }; +struct iso_resource_event { + struct event event; + struct fw_cdev_event_iso_resource resource; +}; + static inline void __user *u64_to_uptr(__u64 value) { return (void __user *)(unsigned long)value; @@ -290,6 +312,16 @@ static void for_each_client(struct fw_device *device, mutex_unlock(&device->client_list_mutex); } +static int schedule_reallocations(int id, void *p, void *data) +{ + struct client_resource *r = p; + + if (r->release == release_iso_resource) + schedule_iso_resource(container_of(r, + struct iso_resource, resource)); + return 0; +} + static void queue_bus_reset_event(struct client *client) { struct bus_reset_event *e; @@ -304,6 +336,10 @@ static void queue_bus_reset_event(struct client *client) queue_event(client, &e->event, &e->reset, sizeof(e->reset), NULL, 0); + + spin_lock_irq(&client->lock); + idr_for_each(&client->resource_idr, schedule_reallocations, client); + spin_unlock_irq(&client->lock); } void fw_device_cdev_update(struct fw_device *device) @@ -376,8 +412,12 @@ static int add_client_resource(struct client *client, else ret = idr_get_new(&client->resource_idr, resource, &resource->handle); - if (ret >= 0) + if (ret >= 0) { client_get(client); + if (resource->release == release_iso_resource) + schedule_iso_resource(container_of(resource, + struct iso_resource, resource)); + } spin_unlock_irqrestore(&client->lock, flags); if (ret == -EAGAIN) @@ -970,6 +1010,177 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) return 0; } +static void iso_resource_work(struct work_struct *work) +{ + struct iso_resource_event *e; + struct iso_resource *r = + container_of(work, struct iso_resource, work.work); + struct client *client = r->client; + int generation, channel, bandwidth, todo; + bool skip, free, success; + + spin_lock_irq(&client->lock); + generation = client->device->generation; + todo = r->todo; + /* Allow 1000ms grace period for other reallocations. */ + if (todo == ISO_RES_ALLOC && + time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { + if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) + client_get(client); + skip = true; + } else { + /* We could be called twice within the same generation. */ + skip = todo == ISO_RES_REALLOC && + r->generation == generation; + } + free = todo == ISO_RES_DEALLOC; + r->generation = generation; + spin_unlock_irq(&client->lock); + + if (skip) + goto out; + + bandwidth = r->bandwidth; + + fw_iso_resource_manage(client->device->card, generation, + r->channels, &channel, &bandwidth, + todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC); + /* + * Is this generation outdated already? As long as this resource sticks + * in the idr, it will be scheduled again for a newer generation or at + * shutdown. + */ + if (channel == -EAGAIN && + (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) + goto out; + + success = channel >= 0 || bandwidth > 0; + + spin_lock_irq(&client->lock); + /* + * Transit from allocation to reallocation, except if the client + * requested deallocation in the meantime. + */ + if (r->todo == ISO_RES_ALLOC) + r->todo = ISO_RES_REALLOC; + /* + * Allocation or reallocation failure? Pull this resource out of the + * idr and prepare for deletion, unless the client is shutting down. + */ + if (r->todo == ISO_RES_REALLOC && !success && + !client->in_shutdown && + idr_find(&client->resource_idr, r->resource.handle)) { + idr_remove(&client->resource_idr, r->resource.handle); + client_put(client); + free = true; + } + spin_unlock_irq(&client->lock); + + if (todo == ISO_RES_ALLOC && channel >= 0) + r->channels = 1ULL << (63 - channel); + + if (todo == ISO_RES_REALLOC && success) + goto out; + + if (todo == ISO_RES_ALLOC) { + e = r->e_alloc; + r->e_alloc = NULL; + } else { + e = r->e_dealloc; + r->e_dealloc = NULL; + } + e->resource.handle = r->resource.handle; + e->resource.channel = channel; + e->resource.bandwidth = bandwidth; + + queue_event(client, &e->event, + &e->resource, sizeof(e->resource), NULL, 0); + + if (free) { + cancel_delayed_work(&r->work); + kfree(r->e_alloc); + kfree(r->e_dealloc); + kfree(r); + } + out: + client_put(client); +} + +static void schedule_iso_resource(struct iso_resource *r) +{ + if (schedule_delayed_work(&r->work, 0)) + client_get(r->client); +} + +static void release_iso_resource(struct client *client, + struct client_resource *resource) +{ + struct iso_resource *r = + container_of(resource, struct iso_resource, resource); + + spin_lock_irq(&client->lock); + r->todo = ISO_RES_DEALLOC; + schedule_iso_resource(r); + spin_unlock_irq(&client->lock); +} + +static int ioctl_allocate_iso_resource(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + struct iso_resource_event *e1, *e2; + struct iso_resource *r; + int ret; + + if ((request->channels == 0 && request->bandwidth == 0) || + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || + request->bandwidth < 0) + return -EINVAL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); + e1 = kmalloc(sizeof(*e1), GFP_KERNEL); + e2 = kmalloc(sizeof(*e2), GFP_KERNEL); + if (r == NULL || e1 == NULL || e2 == NULL) { + ret = -ENOMEM; + goto fail; + } + + INIT_DELAYED_WORK(&r->work, iso_resource_work); + r->client = client; + r->todo = ISO_RES_ALLOC; + r->generation = -1; + r->channels = request->channels; + r->bandwidth = request->bandwidth; + r->e_alloc = e1; + r->e_dealloc = e2; + + e1->resource.closure = request->closure; + e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; + e2->resource.closure = request->closure; + e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; + + r->resource.release = release_iso_resource; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) + goto fail; + request->handle = r->resource.handle; + + return 0; + fail: + kfree(r); + kfree(e1); + kfree(e2); + + return ret; +} + +static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) +{ + struct fw_cdev_deallocate *request = buffer; + + return release_client_resource(client, request->handle, + release_iso_resource, NULL); +} + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_info, ioctl_send_request, @@ -984,6 +1195,8 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_start_iso, ioctl_stop_iso, ioctl_get_cycle_timer, + ioctl_allocate_iso_resource, + ioctl_deallocate_iso_resource, }; static int dispatch_ioctl(struct client *client, diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index 39f3bac..a7b57b2 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -1,5 +1,7 @@ /* - * Isochronous IO functionality + * Isochronous I/O functionality: + * - Isochronous DMA context management + * - Isochronous bus resource management (channels, bandwidth), client side * * Copyright (C) 2006 Kristian Hoegsberg * @@ -18,15 +20,20 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include -#include #include -#include +#include +#include +#include #include +#include +#include -#include "fw-transaction.h" #include "fw-topology.h" -#include "fw-device.h" +#include "fw-transaction.h" + +/* + * Isochronous DMA context management + */ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction) @@ -153,3 +160,160 @@ int fw_iso_context_stop(struct fw_iso_context *ctx) { return ctx->card->driver->stop_iso(ctx); } + +/* + * Isochronous bus resource management (channels, bandwidth), client side + */ + +static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, + int bandwidth, bool allocate) +{ + __be32 data[2]; + int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; + + /* + * On a 1394a IRM with low contention, try < 1 is enough. + * On a 1394-1995 IRM, we need at least try < 2. + * Let's just do try < 5. + */ + for (try = 0; try < 5; try++) { + new = allocate ? old - bandwidth : old + bandwidth; + if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) + break; + + data[0] = cpu_to_be32(old); + data[1] = cpu_to_be32(new); + switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, + data, sizeof(data))) { + case RCODE_GENERATION: + /* A generation change frees all bandwidth. */ + return allocate ? -EAGAIN : bandwidth; + + case RCODE_COMPLETE: + if (be32_to_cpup(data) == old) + return bandwidth; + + old = be32_to_cpup(data); + /* Fall through. */ + } + } + + return -EIO; +} + +static int manage_channel(struct fw_card *card, int irm_id, int generation, + __be32 channels_mask, u64 offset, bool allocate) +{ + __be32 data[2], c, old = allocate ? cpu_to_be32(~0) : 0; + int i, retry = 5; + + for (i = 0; i < 32; i++) { + c = cpu_to_be32(1 << (31 - i)); + if (!(channels_mask & c)) + continue; + + if (allocate == !(old & c)) + continue; + + data[0] = old; + data[1] = old ^ c; + switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + offset, data, sizeof(data))) { + case RCODE_GENERATION: + /* A generation change frees all channels. */ + return allocate ? -EAGAIN : i; + + case RCODE_COMPLETE: + if (data[0] == old) + return i; + + old = data[0]; + + /* Is the IRM 1394a-2000 compliant? */ + if ((data[0] & c) != (data[1] & c)) + continue; + + /* 1394-1995 IRM, fall through to retry. */ + default: + if (retry--) + i--; + } + } + + return -EIO; +} + +static void deallocate_channel(struct fw_card *card, int irm_id, + int generation, int channel) +{ + __be32 mask; + u64 offset; + + mask = channel < 32 ? cpu_to_be32(1 << (31 - channel)) : + cpu_to_be32(1 << (63 - channel)); + offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; + + manage_channel(card, irm_id, generation, mask, offset, false); +} + +/** + * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth + * + * In parameters: card, generation, channels_mask, bandwidth, allocate + * Out parameters: channel, bandwidth + * This function blocks (sleeps) during communication with the IRM. + * Allocates or deallocates at most one channel out of channels_mask. + * + * Returns channel < 0 if no channel was allocated or deallocated. + * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. + * + * If generation is stale, deallocations succeed but allocations fail with + * channel = -EAGAIN. + * + * If channel (de)allocation fails, bandwidth (de)allocation fails too. + * If bandwidth allocation fails, no channel will be allocated either. + * If bandwidth deallocation fails, channel deallocation may still have been + * successful. + */ +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, + bool allocate) +{ + __be32 channels_hi = cpu_to_be32(channels_mask >> 32); + __be32 channels_lo = cpu_to_be32(channels_mask); + int irm_id, ret, c = -EINVAL; + + spin_lock_irq(&card->lock); + irm_id = card->irm_node->node_id; + spin_unlock_irq(&card->lock); + + if (channels_hi) + c = manage_channel(card, irm_id, generation, channels_hi, + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); + if (channels_lo && c < 0) { + c = manage_channel(card, irm_id, generation, channels_lo, + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); + if (c >= 0) + c += 32; + } + *channel = c; + + if (channels_mask != 0 && c < 0) + *bandwidth = 0; + + if (*bandwidth == 0) + return; + + ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); + if (ret < 0) + *bandwidth = 0; + + if (ret < 0 && c >= 0 && allocate) { + deallocate_channel(card, irm_id, generation, c); + *channel = ret; + } +} diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 48e88d5..212a102 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -82,6 +82,7 @@ #define CSR_SPEED_MAP 0x2000 #define CSR_SPEED_MAP_END 0x3000 +#define BANDWIDTH_AVAILABLE_INITIAL 4915 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) #define BROADCAST_CHANNEL_VALID (1 << 30) @@ -343,6 +344,9 @@ int fw_iso_context_start(struct fw_iso_context *ctx, int fw_iso_context_stop(struct fw_iso_context *ctx); void fw_iso_context_destroy(struct fw_iso_context *ctx); +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, bool allocate); + struct fw_card_driver { /* * Enable the given card with the given initial config rom. diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 86c8ff5..25b96dd 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -25,10 +25,12 @@ #include #include -#define FW_CDEV_EVENT_BUS_RESET 0x00 -#define FW_CDEV_EVENT_RESPONSE 0x01 -#define FW_CDEV_EVENT_REQUEST 0x02 -#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_BUS_RESET 0x00 +#define FW_CDEV_EVENT_RESPONSE 0x01 +#define FW_CDEV_EVENT_REQUEST 0x02 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 +#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 /** * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types @@ -147,12 +149,46 @@ struct fw_cdev_event_iso_interrupt { }; /** + * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl + * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * @handle: Reference by which an allocated resource can be deallocated + * @channel: Isochronous channel which was (de)allocated, if any + * @bandwidth: Bandwidth allocation units which were (de)allocated, if any + * @channels_available: Last known availability of channels + * @bandwidth_available: Last known availability of bandwidth + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous + * resource was allocated at the IRM. The client has to check @channel and + * @bandwidth for whether the allocation actually succeeded. + * + * @channel is <0 if no channel was allocated. + * @bandwidth is 0 if no bandwidth was allocated. + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous + * resource was deallocated at the IRM. It is also sent when automatic + * reallocation after a bus reset failed. + */ +struct fw_cdev_event_iso_resource { + __u64 closure; + __u32 type; + __u32 handle; + __s32 channel; + __s32 bandwidth; +}; + +/** * union fw_cdev_event - Convenience union of fw_cdev_event_ types * @common: Valid for all types * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT + * @iso_resource: Valid if @common.type == + * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED * * Convenience union for userspace use. Events could be read(2) into an * appropriately aligned char buffer and then cast to this union for further @@ -163,13 +199,15 @@ struct fw_cdev_event_iso_interrupt { * not fit will be discarded so that the next read(2) will return a new event. */ union fw_cdev_event { - struct fw_cdev_event_common common; - struct fw_cdev_event_bus_reset bus_reset; - struct fw_cdev_event_response response; - struct fw_cdev_event_request request; - struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_common common; + struct fw_cdev_event_bus_reset bus_reset; + struct fw_cdev_event_response response; + struct fw_cdev_event_request request; + struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_iso_resource iso_resource; }; +/* available since kernel version 2.6.22 */ #define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) #define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) #define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) @@ -178,13 +216,18 @@ union fw_cdev_event { #define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) #define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) #define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) - #define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) #define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) #define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) #define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) + +/* available since kernel version 2.6.24 */ #define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) +/* available since kernel version 2.6.30 */ +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) + /* FW_CDEV_VERSION History * * 1 Feb 18, 2007: Initial version. @@ -284,9 +327,9 @@ struct fw_cdev_allocate { }; /** - * struct fw_cdev_deallocate - Free an address range allocation - * @handle: Handle to the address range, as returned by the kernel when the - * range was allocated + * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource + * @handle: Handle to the address range or iso resource, as returned by the + * kernel when the range or resource was allocated */ struct fw_cdev_deallocate { __u32 handle; @@ -479,4 +522,35 @@ struct fw_cdev_get_cycle_timer { __u32 cycle_timer; }; +/** + * struct fw_cdev_allocate_iso_resource - Allocate a channel or bandwidth + * @closure: Passed back to userspace in correponding iso resource events + * @channels: Isochronous channels of which one is to be allocated + * @bandwidth: Isochronous bandwidth units to be allocated + * @handle: Handle to the allocation, written by the kernel + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an + * isochronous channel and/or of isochronous bandwidth at the isochronous + * resource manager (IRM). Only one of the channels specified in @channels is + * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after + * communication with the IRM, indicating success or failure in the event data. + * The kernel will automatically reallocate the resources after bus resets. + * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event + * will be sent. The kernel will also automatically deallocate the resources + * when the file descriptor is closed. + * + * @channels is a host-endian bitfield with the most significant bit + * representing channel 0 and the least significant bit representing channel 63: + * 1ULL << (63 - c) + * + * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send + * one quadlet of data (payload or header data) at speed S1600. + */ +struct fw_cdev_allocate_iso_resource { + __u64 closure; + __u64 channels; + __u32 bandwidth; + __u32 handle; +}; + #endif /* _LINUX_FIREWIRE_CDEV_H */ -- cgit v0.10.2 From 1ec3c0269d7196118cc7c403654ca5f19ef4d584 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: add ioctls for manual iso resource management This adds ioctls for allocation and deallocation of a channel or/and bandwidth without auto-reallocation and without auto-deallocation. The benefit of these ioctls is that libraw1394-style isochronous resource management can be implemented without write access to the IRM's character device file. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index a227853..08fe68d 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -121,14 +121,15 @@ struct iso_resource { struct client *client; /* Schedule work and access todo only with client->lock held. */ struct delayed_work work; - enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,} todo; + enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, + ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; int generation; u64 channels; s32 bandwidth; struct iso_resource_event *e_alloc, *e_dealloc; }; -static void schedule_iso_resource(struct iso_resource *); +static int schedule_iso_resource(struct iso_resource *); static void release_iso_resource(struct client *, struct client_resource *); /* @@ -1033,7 +1034,9 @@ static void iso_resource_work(struct work_struct *work) skip = todo == ISO_RES_REALLOC && r->generation == generation; } - free = todo == ISO_RES_DEALLOC; + free = todo == ISO_RES_DEALLOC || + todo == ISO_RES_ALLOC_ONCE || + todo == ISO_RES_DEALLOC_ONCE; r->generation = generation; spin_unlock_irq(&client->lock); @@ -1044,7 +1047,9 @@ static void iso_resource_work(struct work_struct *work) fw_iso_resource_manage(client->device->card, generation, r->channels, &channel, &bandwidth, - todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC); + todo == ISO_RES_ALLOC || + todo == ISO_RES_REALLOC || + todo == ISO_RES_ALLOC_ONCE); /* * Is this generation outdated already? As long as this resource sticks * in the idr, it will be scheduled again for a newer generation or at @@ -1082,7 +1087,7 @@ static void iso_resource_work(struct work_struct *work) if (todo == ISO_RES_REALLOC && success) goto out; - if (todo == ISO_RES_ALLOC) { + if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { e = r->e_alloc; r->e_alloc = NULL; } else { @@ -1106,10 +1111,17 @@ static void iso_resource_work(struct work_struct *work) client_put(client); } -static void schedule_iso_resource(struct iso_resource *r) +static int schedule_iso_resource(struct iso_resource *r) { - if (schedule_delayed_work(&r->work, 0)) - client_get(r->client); + int scheduled; + + client_get(r->client); + + scheduled = schedule_delayed_work(&r->work, 0); + if (!scheduled) + client_put(r->client); + + return scheduled; } static void release_iso_resource(struct client *client, @@ -1124,9 +1136,9 @@ static void release_iso_resource(struct client *client, spin_unlock_irq(&client->lock); } -static int ioctl_allocate_iso_resource(struct client *client, void *buffer) +static int init_iso_resource(struct client *client, + struct fw_cdev_allocate_iso_resource *request, int todo) { - struct fw_cdev_allocate_iso_resource *request = buffer; struct iso_resource_event *e1, *e2; struct iso_resource *r; int ret; @@ -1146,7 +1158,7 @@ static int ioctl_allocate_iso_resource(struct client *client, void *buffer) INIT_DELAYED_WORK(&r->work, iso_resource_work); r->client = client; - r->todo = ISO_RES_ALLOC; + r->todo = todo; r->generation = -1; r->channels = request->channels; r->bandwidth = request->bandwidth; @@ -1158,8 +1170,14 @@ static int ioctl_allocate_iso_resource(struct client *client, void *buffer) e2->resource.closure = request->closure; e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; - r->resource.release = release_iso_resource; - ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (todo == ISO_RES_ALLOC) { + r->resource.release = release_iso_resource; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + } else { + r->resource.release = NULL; + r->resource.handle = -1; + ret = schedule_iso_resource(r) ? 0 : -ENOMEM; + } if (ret < 0) goto fail; request->handle = r->resource.handle; @@ -1173,6 +1191,13 @@ static int ioctl_allocate_iso_resource(struct client *client, void *buffer) return ret; } +static int ioctl_allocate_iso_resource(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + + return init_iso_resource(client, request, ISO_RES_ALLOC); +} + static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) { struct fw_cdev_deallocate *request = buffer; @@ -1181,6 +1206,20 @@ static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) release_iso_resource, NULL); } +static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + + return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); +} + +static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + + return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); +} + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_info, ioctl_send_request, @@ -1197,6 +1236,8 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_cycle_timer, ioctl_allocate_iso_resource, ioctl_deallocate_iso_resource, + ioctl_allocate_iso_resource_once, + ioctl_deallocate_iso_resource_once, }; static int dispatch_ioctl(struct client *client, diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 25b96dd..08ca838 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -151,7 +151,7 @@ struct fw_cdev_event_iso_interrupt { /** * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed * @closure: See &fw_cdev_event_common; - * set by %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl + * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED * @handle: Reference by which an allocated resource can be deallocated @@ -164,12 +164,12 @@ struct fw_cdev_event_iso_interrupt { * resource was allocated at the IRM. The client has to check @channel and * @bandwidth for whether the allocation actually succeeded. * - * @channel is <0 if no channel was allocated. - * @bandwidth is 0 if no bandwidth was allocated. - * * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous * resource was deallocated at the IRM. It is also sent when automatic * reallocation after a bus reset failed. + * + * @channel is <0 if no channel was (de)allocated or if reallocation failed. + * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed. */ struct fw_cdev_event_iso_resource { __u64 closure; @@ -225,8 +225,10 @@ union fw_cdev_event { #define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) /* available since kernel version 2.6.30 */ -#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) -#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) /* FW_CDEV_VERSION History * @@ -523,11 +525,12 @@ struct fw_cdev_get_cycle_timer { }; /** - * struct fw_cdev_allocate_iso_resource - Allocate a channel or bandwidth + * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth * @closure: Passed back to userspace in correponding iso resource events - * @channels: Isochronous channels of which one is to be allocated - * @bandwidth: Isochronous bandwidth units to be allocated - * @handle: Handle to the allocation, written by the kernel + * @channels: Isochronous channels of which one is to be (de)allocated + * @bandwidth: Isochronous bandwidth units to be (de)allocated + * @handle: Handle to the allocation, written by the kernel (only valid in + * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls) * * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an * isochronous channel and/or of isochronous bandwidth at the isochronous @@ -539,6 +542,25 @@ struct fw_cdev_get_cycle_timer { * will be sent. The kernel will also automatically deallocate the resources * when the file descriptor is closed. * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate + * deallocation of resources which were allocated as described above. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation + * without automatic re- or deallocation. + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation, + * indicating success or failure in its data. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like + * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed + * instead of allocated. At most one channel may be specified in this ioctl. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources + * for the lifetime of the fd or handle. + * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources + * for the duration of a bus generation. + * * @channels is a host-endian bitfield with the most significant bit * representing channel 0 and the least significant bit representing channel 63: * 1ULL << (63 - c) -- cgit v0.10.2 From 33580a3ef5ba3bc0ee1b520df82a24bb37ce28f0 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: add ioctl to query maximum transmission speed While the speed of asynchronous transactions is automatically chosen by the kernel, the speed of isochronous streams has to be chosen by the initiating client. In case of 1394a bus topologies, the maximum possible speed could be figured out with some effort by evaluation of the remote node's link speed field in the config ROM, the local node's link speed field, and the PHY speeds and topologic information in the local node's or IRM's topology map CSR. However, this does not work in case of 1394b buses. Hence add an ioctl to export the maximum speed which the kernel already determined. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 08fe68d..05ad2a8 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1220,6 +1220,15 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); } +static int ioctl_get_speed(struct client *client, void *buffer) +{ + struct fw_cdev_get_speed *request = buffer; + + request->max_speed = client->device->max_speed; + + return 0; +} + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_info, ioctl_send_request, @@ -1238,6 +1247,7 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_deallocate_iso_resource, ioctl_allocate_iso_resource_once, ioctl_deallocate_iso_resource_once, + ioctl_get_speed, }; static int dispatch_ioctl(struct client *client, diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 08ca838..f819c10 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -229,6 +229,7 @@ union fw_cdev_event { #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) #define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed) /* FW_CDEV_VERSION History * @@ -575,4 +576,13 @@ struct fw_cdev_allocate_iso_resource { __u32 handle; }; +/** + * struct fw_cdev_get_speed - Query maximum speed to or from this device + * @max_speed: Speed code; minimum of the device's link speed, the local node's + * link speed, and all PHY port speeds between the two links + */ +struct fw_cdev_get_speed { + __u32 max_speed; +}; + #endif /* _LINUX_FIREWIRE_CDEV_H */ -- cgit v0.10.2 From acfe8333572cad5dc70fce18ac966be0446548d7 Mon Sep 17 00:00:00 2001 From: "Jay Fenlason, Stefan Richter" Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: add ioctl for broadcast write requests Write transactions to the broadcast node ID are a convenient way to trigger functions of multiple nodes at once. IIDC is a protocol which can make use of this if multiple cameras with same command_regs_base are connected at the same bus. Based on Date: Wed, 10 Sep 2008 11:32:16 -0400 From: Jay Fenlason Subject: [patch] SEND_BROADCAST_REQUEST Changes: ioctl_send_request() and ioctl_send_broadcast_request() now share code. Broadcast speed corrected to S100. Check for proper tcode. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 05ad2a8..a1637a8 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -518,10 +518,10 @@ static void complete_transaction(struct fw_card *card, int rcode, client_put(client); } -static int ioctl_send_request(struct client *client, void *buffer) +static int init_request(struct client *client, + struct fw_cdev_send_request *request, + int destination_id, int speed) { - struct fw_device *device = client->device; - struct fw_cdev_send_request *request = buffer; struct outbound_transaction_event *e; int ret; @@ -544,24 +544,6 @@ static int ioctl_send_request(struct client *client, void *buffer) goto failed; } - switch (request->tcode) { - case TCODE_WRITE_QUADLET_REQUEST: - case TCODE_WRITE_BLOCK_REQUEST: - case TCODE_READ_QUADLET_REQUEST: - case TCODE_READ_BLOCK_REQUEST: - case TCODE_LOCK_MASK_SWAP: - case TCODE_LOCK_COMPARE_SWAP: - case TCODE_LOCK_FETCH_ADD: - case TCODE_LOCK_LITTLE_ADD: - case TCODE_LOCK_BOUNDED_ADD: - case TCODE_LOCK_WRAP_ADD: - case TCODE_LOCK_VENDOR_DEPENDENT: - break; - default: - ret = -EINVAL; - goto failed; - } - e->r.resource.release = release_transaction; ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); if (ret < 0) @@ -570,12 +552,9 @@ static int ioctl_send_request(struct client *client, void *buffer) /* Get a reference for the transaction callback */ client_get(client); - fw_send_request(device->card, &e->r.transaction, - request->tcode & 0x1f, - device->node->node_id, - request->generation, - device->max_speed, - request->offset, + fw_send_request(client->device->card, &e->r.transaction, + request->tcode & 0x1f, destination_id, + request->generation, speed, request->offset, e->response.data, request->length, complete_transaction, e); @@ -589,6 +568,31 @@ static int ioctl_send_request(struct client *client, void *buffer) return ret; } +static int ioctl_send_request(struct client *client, void *buffer) +{ + struct fw_cdev_send_request *request = buffer; + + switch (request->tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_QUADLET_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_MASK_SWAP: + case TCODE_LOCK_COMPARE_SWAP: + case TCODE_LOCK_FETCH_ADD: + case TCODE_LOCK_LITTLE_ADD: + case TCODE_LOCK_BOUNDED_ADD: + case TCODE_LOCK_WRAP_ADD: + case TCODE_LOCK_VENDOR_DEPENDENT: + break; + default: + return -EINVAL; + } + + return init_request(client, request, client->device->node->node_id, + client->device->max_speed); +} + static void release_request(struct client *client, struct client_resource *resource) { @@ -1229,6 +1233,21 @@ static int ioctl_get_speed(struct client *client, void *buffer) return 0; } +static int ioctl_send_broadcast_request(struct client *client, void *buffer) +{ + struct fw_cdev_send_request *request = buffer; + + switch (request->tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + break; + default: + return -EINVAL; + } + + return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); +} + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_info, ioctl_send_request, @@ -1248,6 +1267,7 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_allocate_iso_resource_once, ioctl_deallocate_iso_resource_once, ioctl_get_speed, + ioctl_send_broadcast_request, }; static int dispatch_ioctl(struct client *client, diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index f819c10..340a785 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -230,6 +230,7 @@ union fw_cdev_event { #define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) #define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed) +#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) /* FW_CDEV_VERSION History * -- cgit v0.10.2 From 1566f3dc3e5986a16c7bbb3bb95bb691251a8d25 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: restrict broadcast write requests to Units Space We don't want random users write to Memory Space (e.g. PCs with physical DMA filters down) or to core CSRs like Reset_Start. This does not protect SBP-2 target CSRs. But properly behaving SBP-2 targets ignore broadcast write requests to these registers, and the maximum damage which can happen with laxer targets is DOS. But there are ways to create DOS situations anyway if there are devices with weak device file permissions (like audio/video devices) present at the same bus as an SBP-2 target. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index a1637a8..d48fa1c 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1245,6 +1245,10 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer) return -EINVAL; } + /* Security policy: Only allow accesses to Units Space. */ + if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) + return -EACCES; + return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); } -- cgit v0.10.2 From 5d3fd692a7196a9045fb606f891f5987959b65a0 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: extend transaction payload size check Make the size check of ioctl_send_request and ioctl_send_broadcast_request speed dependent. Also change the error return code from -EINVAL to -EIO to distinguish this from other errors concerning the ioctl parameters. Another payload size limit for which we don't check here though is the remote node's Bus_Info_Block.max_rec. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index d48fa1c..6b33f15 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -525,9 +525,8 @@ static int init_request(struct client *client, struct outbound_transaction_event *e; int ret; - /* What is the biggest size we'll accept, really? */ - if (request->length > 4096) - return -EINVAL; + if (request->length > 4096 || request->length > 512 << speed) + return -EIO; e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); if (e == NULL) -- cgit v0.10.2 From 3ba949868a6dc082b24cba5c3bf3f50de7391433 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 4 Jan 2009 16:23:29 +0100 Subject: firewire: cdev: replace some spin_lock_irqsave by spin_lock_irq All of these functions are entered with IRQs enabled. Hence the unconditional spin_unlock_irq can be used. Function: Caller context: dequeue_event() client process, via read(2) fill_bus_reset_event() fw-device.c update worqueue job release_client_resource() client process, via ioctl(2) fw_device_op_release() client process, via close(2) Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 6b33f15..3d816a6 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -237,7 +237,6 @@ static void queue_event(struct client *client, struct event *event, static int dequeue_event(struct client *client, char __user *buffer, size_t count) { - unsigned long flags; struct event *event; size_t size, total; int i, ret; @@ -252,10 +251,10 @@ static int dequeue_event(struct client *client, fw_device_is_shutdown(client->device)) return -ENODEV; - spin_lock_irqsave(&client->lock, flags); + spin_lock_irq(&client->lock); event = list_first_entry(&client->event_list, struct event, link); list_del(&event->link); - spin_unlock_irqrestore(&client->lock, flags); + spin_unlock_irq(&client->lock); total = 0; for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { @@ -286,9 +285,8 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, struct client *client) { struct fw_card *card = client->device->card; - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&card->lock); event->closure = client->bus_reset_closure; event->type = FW_CDEV_EVENT_BUS_RESET; @@ -299,7 +297,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, event->irm_node_id = card->irm_node->node_id; event->root_node_id = card->root_node->node_id; - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&card->lock); } static void for_each_client(struct fw_device *device, @@ -432,16 +430,15 @@ static int release_client_resource(struct client *client, u32 handle, struct client_resource **resource) { struct client_resource *r; - unsigned long flags; - spin_lock_irqsave(&client->lock, flags); + spin_lock_irq(&client->lock); if (client->in_shutdown) r = NULL; else r = idr_find(&client->resource_idr, handle); if (r && r->release == release) idr_remove(&client->resource_idr, handle); - spin_unlock_irqrestore(&client->lock, flags); + spin_unlock_irq(&client->lock); if (!(r && r->release == release)) return -EINVAL; @@ -1384,7 +1381,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct event *e, *next_e; - unsigned long flags; mutex_lock(&client->device->client_list_mutex); list_del(&client->link); @@ -1397,9 +1393,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file) fw_iso_context_destroy(client->iso_context); /* Freeze client->resource_idr and client->event_list */ - spin_lock_irqsave(&client->lock, flags); + spin_lock_irq(&client->lock); client->in_shutdown = true; - spin_unlock_irqrestore(&client->lock, flags); + spin_unlock_irq(&client->lock); idr_for_each(&client->resource_idr, shutdown_resource, client); idr_remove_all(&client->resource_idr); -- cgit v0.10.2 From 36a755cfc398fc50abc74055d4478c1b067dac55 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Mon, 5 Jan 2009 20:28:10 +0100 Subject: firewire: cdev: shut down iso context before freeing the buffer DMA must be halted before we DMA-unmap and free the DMA buffer. Since we cannot rely on the client to stop the context before it closes the fd, we have to reorder fw_iso_buffer_destroy vs. fw_iso_context_destroy. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 3d816a6..b93ad9c 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1386,12 +1386,12 @@ static int fw_device_op_release(struct inode *inode, struct file *file) list_del(&client->link); mutex_unlock(&client->device->client_list_mutex); - if (client->buffer.pages) - fw_iso_buffer_destroy(&client->buffer, client->device->card); - if (client->iso_context) fw_iso_context_destroy(client->iso_context); + if (client->buffer.pages) + fw_iso_buffer_destroy(&client->buffer, client->device->card); + /* Freeze client->resource_idr and client->event_list */ spin_lock_irq(&client->lock); client->in_shutdown = true; -- cgit v0.10.2 From 77258da403be4cfce84b6abcdb515ad0bd1f92f1 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Wed, 7 Jan 2009 20:14:53 +0100 Subject: firewire: cdev: increment fw_cdev_version, update documentation Necessary due to Date: Tue, 22 Jul 2008 23:23:40 -0700 From: David Moore Subject: firewire: Include iso timestamp in headers when header_size > 4 Side note: The lack of upwards compatibility sounds worse than it is. All existing client implementations, libraw1394 and libdc1394, set header_size = 4. And since the ABI v1 behaviour does not offer any advantages over the new behaviour, we deliberately do not provide the old behaviour anymore. Also add documentation about the format of fw_cdev_get_cycle_timer which may be used in conjunction with the timestamp of iso packets but has a different format. Signed-off-by: Stefan Richter diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 340a785..6ed9127 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -138,7 +138,24 @@ struct fw_cdev_event_request { * This event is sent when the controller has completed an &fw_cdev_iso_packet * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers * stripped of all packets up until and including the interrupt packet are - * returned in the @header field. + * returned in the @header field. The amount of header data per packet is as + * specified at iso context creation by &fw_cdev_create_iso_context.header_size. + * + * In version 1 of this ABI, header data consisted of the 1394 isochronous + * packet header, followed by quadlets from the packet payload if + * &fw_cdev_create_iso_context.header_size > 4. + * + * In version 2 of this ABI, header data consist of the 1394 isochronous + * packet header, followed by a timestamp quadlet if + * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the + * packet payload if &fw_cdev_create_iso_context.header_size > 8. + * + * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. + * + * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, + * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: + * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte + * order. */ struct fw_cdev_event_iso_interrupt { __u64 closure; @@ -232,11 +249,13 @@ union fw_cdev_event { #define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed) #define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) -/* FW_CDEV_VERSION History - * - * 1 Feb 18, 2007: Initial version. +/* + * FW_CDEV_VERSION History + * 1 (2.6.22) - initial version + * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if + * &fw_cdev_create_iso_context.header_size is 8 or more */ -#define FW_CDEV_VERSION 1 +#define FW_CDEV_VERSION 2 /** * struct fw_cdev_get_info - General purpose information ioctl @@ -417,6 +436,9 @@ struct fw_cdev_remove_descriptor { * * If a context was successfully created, the kernel writes back a handle to the * context, which must be passed in for subsequent operations on that context. + * + * Note that the effect of a @header_size > 4 depends on + * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. */ struct fw_cdev_create_iso_context { __u32 type; @@ -520,6 +542,9 @@ struct fw_cdev_stop_iso { * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer * and also the system clock. This allows to express the receive time of an * isochronous packet as a system time with microsecond accuracy. + * + * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and + * 12 bits cycleOffset, in host byte order. */ struct fw_cdev_get_cycle_timer { __u64 local_time; -- cgit v0.10.2 From 5d9cb7d276a9c465fef5a771792eac2cf1929f2b Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 8 Jan 2009 23:07:40 +0100 Subject: firewire: cdev: add ioctls for iso resource management, amendment Some fixes: - Remove stale documentation. - Fix a != vs. == thinko that got in the way of channel management. - Try bandwidth deallocation even if channel deallocation failed. A simplification: - fw_cdev_allocate_iso_resource.channels is now ordered like libdc1394's dc1394_iso_allocate_channel() channels_allowed argument. By the way, I looked closer at cards from NEC, TI, and VIA, and noticed that they all don't implement IEEE 1394a behaviour which is meant to deviate from IEEE 1212's notion of lock compare-swap. This means that we have to do two lock transactions instead of one in many cases where one transaction would already succeed on a fully 1394a compliant IRM. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index b93ad9c..257b0c7 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1082,7 +1082,7 @@ static void iso_resource_work(struct work_struct *work) spin_unlock_irq(&client->lock); if (todo == ISO_RES_ALLOC && channel >= 0) - r->channels = 1ULL << (63 - channel); + r->channels = 1ULL << channel; if (todo == ISO_RES_REALLOC && success) goto out; diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index a7b57b2..f511d16 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -204,17 +204,19 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, } static int manage_channel(struct fw_card *card, int irm_id, int generation, - __be32 channels_mask, u64 offset, bool allocate) + u32 channels_mask, u64 offset, bool allocate) { - __be32 data[2], c, old = allocate ? cpu_to_be32(~0) : 0; + __be32 data[2], c, all, old; int i, retry = 5; + old = all = allocate ? cpu_to_be32(~0) : 0; + for (i = 0; i < 32; i++) { - c = cpu_to_be32(1 << (31 - i)); - if (!(channels_mask & c)) + if (!(channels_mask & 1 << i)) continue; - if (allocate == !(old & c)) + c = cpu_to_be32(1 << (31 - i)); + if ((old & c) != (all & c)) continue; data[0] = old; @@ -233,7 +235,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, old = data[0]; /* Is the IRM 1394a-2000 compliant? */ - if ((data[0] & c) != (data[1] & c)) + if ((data[0] & c) == (data[1] & c)) continue; /* 1394-1995 IRM, fall through to retry. */ @@ -249,11 +251,10 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, static void deallocate_channel(struct fw_card *card, int irm_id, int generation, int channel) { - __be32 mask; + u32 mask; u64 offset; - mask = channel < 32 ? cpu_to_be32(1 << (31 - channel)) : - cpu_to_be32(1 << (63 - channel)); + mask = channel < 32 ? 1 << channel : 1 << (channel - 32); offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; @@ -266,7 +267,12 @@ static void deallocate_channel(struct fw_card *card, int irm_id, * In parameters: card, generation, channels_mask, bandwidth, allocate * Out parameters: channel, bandwidth * This function blocks (sleeps) during communication with the IRM. + * * Allocates or deallocates at most one channel out of channels_mask. + * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. + * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for + * channel 0 and LSB for channel 63.) + * Allocates or deallocates as many bandwidth allocation units as specified. * * Returns channel < 0 if no channel was allocated or deallocated. * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. @@ -274,17 +280,17 @@ static void deallocate_channel(struct fw_card *card, int irm_id, * If generation is stale, deallocations succeed but allocations fail with * channel = -EAGAIN. * - * If channel (de)allocation fails, bandwidth (de)allocation fails too. + * If channel allocation fails, no bandwidth will be allocated either. * If bandwidth allocation fails, no channel will be allocated either. - * If bandwidth deallocation fails, channel deallocation may still have been - * successful. + * But deallocations of channel and bandwidth are tried independently + * of each other's success. */ void fw_iso_resource_manage(struct fw_card *card, int generation, u64 channels_mask, int *channel, int *bandwidth, bool allocate) { - __be32 channels_hi = cpu_to_be32(channels_mask >> 32); - __be32 channels_lo = cpu_to_be32(channels_mask); + u32 channels_hi = channels_mask; /* channels 31...0 */ + u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ int irm_id, ret, c = -EINVAL; spin_lock_irq(&card->lock); @@ -302,7 +308,7 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, } *channel = c; - if (channels_mask != 0 && c < 0) + if (allocate && channels_mask != 0 && c < 0) *bandwidth = 0; if (*bandwidth == 0) @@ -312,7 +318,7 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, if (ret < 0) *bandwidth = 0; - if (ret < 0 && c >= 0 && allocate) { + if (allocate && ret < 0 && c >= 0) { deallocate_channel(card, irm_id, generation, c); *channel = ret; } diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 6ed9127..2e35379b 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -174,8 +174,6 @@ struct fw_cdev_event_iso_interrupt { * @handle: Reference by which an allocated resource can be deallocated * @channel: Isochronous channel which was (de)allocated, if any * @bandwidth: Bandwidth allocation units which were (de)allocated, if any - * @channels_available: Last known availability of channels - * @bandwidth_available: Last known availability of bandwidth * * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous * resource was allocated at the IRM. The client has to check @channel and @@ -580,7 +578,7 @@ struct fw_cdev_get_cycle_timer { * * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed - * instead of allocated. At most one channel may be specified in this ioctl. + * instead of allocated. * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. * * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources @@ -588,9 +586,9 @@ struct fw_cdev_get_cycle_timer { * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources * for the duration of a bus generation. * - * @channels is a host-endian bitfield with the most significant bit - * representing channel 0 and the least significant bit representing channel 63: - * 1ULL << (63 - c) + * @channels is a host-endian bitfield with the least significant bit + * representing channel 0 and the most significant bit representing channel 63: + * 1ULL << c for each channel c that is a candidate for (de)allocation. * * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send * one quadlet of data (payload or header data) at speed S1600. -- cgit v0.10.2 From 81610b8fbfc027a67707ff567d490819a3d55844 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 11 Jan 2009 13:44:46 +0100 Subject: firewire: cdev: simplify a schedule_delayed_work wrapper The kernel API documentation says that queue_delayed_work() returns 0 (only) if the work was already queued. The return codes of schedule_delayed_work() are not documented but the same. In init_iso_resource(), the work has never been queued yet, hence we can assume schedule_delayed_work() to be a guaranteed success there. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 257b0c7..214e534 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -129,7 +129,7 @@ struct iso_resource { struct iso_resource_event *e_alloc, *e_dealloc; }; -static int schedule_iso_resource(struct iso_resource *); +static void schedule_iso_resource(struct iso_resource *); static void release_iso_resource(struct client *, struct client_resource *); /* @@ -1111,17 +1111,11 @@ static void iso_resource_work(struct work_struct *work) client_put(client); } -static int schedule_iso_resource(struct iso_resource *r) +static void schedule_iso_resource(struct iso_resource *r) { - int scheduled; - client_get(r->client); - - scheduled = schedule_delayed_work(&r->work, 0); - if (!scheduled) + if (!schedule_delayed_work(&r->work, 0)) client_put(r->client); - - return scheduled; } static void release_iso_resource(struct client *client, @@ -1173,13 +1167,13 @@ static int init_iso_resource(struct client *client, if (todo == ISO_RES_ALLOC) { r->resource.release = release_iso_resource; ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) + goto fail; } else { r->resource.release = NULL; r->resource.handle = -1; - ret = schedule_iso_resource(r) ? 0 : -ENOMEM; + schedule_iso_resource(r); } - if (ret < 0) - goto fail; request->handle = r->resource.handle; return 0; -- cgit v0.10.2 From 41f321c2ecf416f9dcf76de989e9059fd699c8c1 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 17 Jan 2009 22:45:54 +0100 Subject: firewire: core: clean up includes Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index ac5043c..7e05e99 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -18,24 +18,26 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include -#include -#include -#include -#include +#include #include +#include +#include #include #include -#include +#include +#include #include #include #include #include +#include +#include + #include -#include -#include "fw-transaction.h" -#include "fw-topology.h" + #include "fw-device.h" +#include "fw-topology.h" +#include "fw-transaction.h" void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) { diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 41483f1..3085a74 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -19,11 +19,17 @@ #ifndef __fw_device_h #define __fw_device_h +#include #include -#include #include -#include +#include +#include #include +#include +#include +#include +#include + #include enum fw_device_state { @@ -39,6 +45,9 @@ struct fw_attribute_group { struct attribute *attrs[11]; }; +struct fw_node; +struct fw_card; + /* * Note, fw_device.generation always has to be read before fw_device.node_id. * Use SMP memory barriers to ensure this. Otherwise requests will be sent -- cgit v0.10.2 From aed808927410d0b1d80378492059f22a46974267 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 17 Jan 2009 22:45:54 +0100 Subject: firewire: core: move some functions Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index 7e05e99..7276a0d 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -155,27 +155,6 @@ struct bus_type fw_bus_type = { }; EXPORT_SYMBOL(fw_bus_type); -static void fw_device_release(struct device *dev) -{ - struct fw_device *device = fw_device(dev); - struct fw_card *card = device->card; - unsigned long flags; - - /* - * Take the card lock so we don't set this to NULL while a - * FW_NODE_UPDATED callback is being handled or while the - * bus manager work looks at this node. - */ - spin_lock_irqsave(&card->lock, flags); - device->node->data = NULL; - spin_unlock_irqrestore(&card->lock, flags); - - fw_node_put(device->node); - kfree(device->config_rom); - kfree(device); - fw_card_put(card); -} - int fw_device_enable_phys_dma(struct fw_device *device) { int generation = device->generation; @@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work) fw_device_put(device); } +static void fw_device_release(struct device *dev) +{ + struct fw_device *device = fw_device(dev); + struct fw_card *card = device->card; + unsigned long flags; + + /* + * Take the card lock so we don't set this to NULL while a + * FW_NODE_UPDATED callback is being handled or while the + * bus manager work looks at this node. + */ + spin_lock_irqsave(&card->lock, flags); + device->node->data = NULL; + spin_unlock_irqrestore(&card->lock, flags); + + fw_node_put(device->node); + kfree(device->config_rom); + kfree(device); + fw_card_put(card); +} + static struct device_type fw_device_type = { - .release = fw_device_release, + .release = fw_device_release, }; -static void fw_device_update(struct work_struct *work); +static int update_unit(struct device *dev, void *data) +{ + struct fw_unit *unit = fw_unit(dev); + struct fw_driver *driver = (struct fw_driver *)dev->driver; + + if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { + down(&dev->sem); + driver->update(unit); + up(&dev->sem); + } + + return 0; +} + +static void fw_device_update(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + + fw_device_cdev_update(device); + device_for_each_child(&device->device, NULL, update_unit); +} /* * If a device was pending for deletion because its node went away but its @@ -851,29 +872,6 @@ static void fw_device_init(struct work_struct *work) put_device(&device->device); /* our reference */ } -static int update_unit(struct device *dev, void *data) -{ - struct fw_unit *unit = fw_unit(dev); - struct fw_driver *driver = (struct fw_driver *)dev->driver; - - if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { - down(&dev->sem); - driver->update(unit); - up(&dev->sem); - } - - return 0; -} - -static void fw_device_update(struct work_struct *work) -{ - struct fw_device *device = - container_of(work, struct fw_device, work.work); - - fw_device_cdev_update(device); - device_for_each_child(&device->device, NULL, update_unit); -} - enum { REREAD_BIB_ERROR, REREAD_BIB_GONE, -- cgit v0.10.2 From d01b01787680a1156ff6a554e40baa460bb88efb Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 17 Jan 2009 22:45:54 +0100 Subject: firewire: core: remove condition which is always false reread_bus_info_block() only gets to see devices whose config_rom_length is at least 6 (ROM header, bus info block, root directory header). Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index 7276a0d..df789d3 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -892,7 +892,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation) if (i == 0 && q == 0) return REREAD_BIB_GONE; - if (i > device->config_rom_length || q != device->config_rom[i]) + if (q != device->config_rom[i]) return REREAD_BIB_CHANGED; } -- cgit v0.10.2 From e1eff7a393d4a4e3ad1cf65fcba899146840bfd2 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 3 Feb 2009 17:55:19 +0100 Subject: firewire: normalize a variable name Standardize on if (err) handle_error; and if (ret < 0) handle_error; Don't call a variable err if we store values in it which mean success. Also, offset some return statements by a blank line since this how we do it in drivers/firewire. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 27a3aae..08a7e18 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -404,7 +404,7 @@ int fw_card_add(struct fw_card *card, { u32 *config_rom; size_t length; - int err; + int ret; card->max_receive = max_receive; card->link_speed = link_speed; @@ -415,13 +415,14 @@ int fw_card_add(struct fw_card *card, list_add_tail(&card->link, &card_list); mutex_unlock(&card_mutex); - err = card->driver->enable(card, config_rom, length); - if (err < 0) { + ret = card->driver->enable(card, config_rom, length); + if (ret < 0) { mutex_lock(&card_mutex); list_del(&card->link); mutex_unlock(&card_mutex); } - return err; + + return ret; } EXPORT_SYMBOL(fw_card_add); diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index df789d3..633e44d 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -761,7 +761,7 @@ static void fw_device_init(struct work_struct *work) struct fw_device *device = container_of(work, struct fw_device, work.work); struct device *revived_dev; - int minor, err; + int minor, ret; /* * All failure paths here set node->data to NULL, so that we @@ -797,12 +797,12 @@ static void fw_device_init(struct work_struct *work) fw_device_get(device); down_write(&fw_device_rwsem); - err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? + ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? idr_get_new(&fw_device_idr, device, &minor) : -ENOMEM; up_write(&fw_device_rwsem); - if (err < 0) + if (ret < 0) goto error; device->device.bus = &fw_bus_type; diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index f511d16..2baf100 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -75,19 +75,21 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, kfree(buffer->pages); out: buffer->pages = NULL; + return -ENOMEM; } int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) { unsigned long uaddr; - int i, ret; + int i, err; uaddr = vma->vm_start; for (i = 0; i < buffer->page_count; i++) { - ret = vm_insert_page(vma, uaddr, buffer->pages[i]); - if (ret) - return ret; + err = vm_insert_page(vma, uaddr, buffer->pages[i]); + if (err) + return err; + uaddr += PAGE_SIZE; } diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 859af71..c922783 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -2459,11 +2459,12 @@ static int __devinit pci_probe(struct pci_dev *dev, reg_read(ohci, OHCI1394_GUIDLo); err = fw_card_add(&ohci->card, max_receive, link_speed, guid); - if (err < 0) + if (err) goto fail_self_id; fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", dev_name(&dev->dev), version >> 16, version & 0xff); + return 0; fail_self_id: -- cgit v0.10.2 From ba27e1f7bf220799cd3d7503f82bda71b8ebe8c5 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 5 Mar 2009 19:07:00 +0100 Subject: firewire: core: normalize a function argument name It's called "payload" rather than "data" almost everywhere in fw-transaction.c. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 1537737..76938fe 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -317,15 +317,15 @@ static void transaction_callback(struct fw_card *card, int rcode, */ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, int generation, int speed, unsigned long long offset, - void *data, size_t length) + void *payload, size_t length) { struct transaction_callback_data d; struct fw_transaction t; init_completion(&d.done); - d.payload = data; + d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, - offset, data, length, transaction_callback, &d); + offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); return d.rcode; diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 212a102..35d0a4b 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -405,14 +405,14 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, - unsigned long long offset, void *data, size_t length, + unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data); int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction); void fw_flush_transactions(struct fw_card *card); int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, int generation, int speed, unsigned long long offset, - void *data, size_t length); + void *payload, size_t length); void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count); -- cgit v0.10.2 From f8c2287c65f8f72000102fc058232669e4540bc4 Mon Sep 17 00:00:00 2001 From: Jay Fenlason Date: Thu, 5 Mar 2009 19:08:40 +0100 Subject: firewire: implement asynchronous stream transmission Allow userspace and other firewire drivers (fw-ipv4 I'm looking at you!) to send Asynchronous Transmit Streams as described in 7.8.3 of release 1.1 of the 1394 Open Host Controller Interface Specification. Signed-off-by: Jay Fenlason Signed-off-by: Stefan Richter (tweaks) diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 214e534..539dae5 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1242,6 +1242,38 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer) return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); } +struct stream_packet { + struct fw_packet packet; + u8 data[0]; +}; + +static void send_stream_packet_done(struct fw_packet *packet, + struct fw_card *card, int status) +{ + kfree(container_of(packet, struct stream_packet, packet)); +} + +static int ioctl_send_stream_packet(struct client *client, void *buffer) +{ + struct fw_cdev_send_stream_packet *request = buffer; + struct stream_packet *p; + + p = kmalloc(sizeof(*p) + request->size, GFP_KERNEL); + if (p == NULL) + return -ENOMEM; + + if (request->data && + copy_from_user(p->data, u64_to_uptr(request->data), request->size)) { + kfree(p); + return -EFAULT; + } + fw_send_stream_packet(client->device->card, &p->packet, + request->generation, request->speed, + request->channel, request->sy, request->tag, + p->data, request->size, send_stream_packet_done); + return 0; +} + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_info, ioctl_send_request, @@ -1262,6 +1294,7 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_deallocate_iso_resource_once, ioctl_get_speed, ioctl_send_broadcast_request, + ioctl_send_stream_packet, }; static int dispatch_ioctl(struct client *client, diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index c922783..1180d0b 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -936,7 +936,9 @@ static int at_context_queue_packet(struct context *ctx, */ header = (__le32 *) &d[1]; - if (packet->header_length > 8) { + switch (packet->header_length) { + case 16: + case 12: header[0] = cpu_to_le32((packet->header[0] & 0xffff) | (packet->speed << 16)); header[1] = cpu_to_le32((packet->header[1] & 0xffff) | @@ -950,12 +952,27 @@ static int at_context_queue_packet(struct context *ctx, header[3] = (__force __le32) packet->header[3]; d[0].req_count = cpu_to_le16(packet->header_length); - } else { + break; + + case 8: header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | (packet->speed << 16)); header[1] = cpu_to_le32(packet->header[0]); header[2] = cpu_to_le32(packet->header[1]); d[0].req_count = cpu_to_le16(12); + break; + + case 4: + header[0] = cpu_to_le32((packet->header[0] & 0xffff) | + (packet->speed << 16)); + header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); + d[0].req_count = cpu_to_le16(8); + break; + + default: + /* BUG(); */ + packet->ack = RCODE_SEND_ERROR; + return -1; } driver_data = (struct driver_data *) &d[3]; diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 76938fe..e3da589 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -37,6 +37,10 @@ #include "fw-topology.h" #include "fw-device.h" +#define HEADER_TAG(tag) ((tag) << 14) +#define HEADER_CHANNEL(ch) ((ch) << 8) +#define HEADER_SY(sy) ((sy) << 0) + #define HEADER_PRI(pri) ((pri) << 0) #define HEADER_TCODE(tcode) ((tcode) << 4) #define HEADER_RETRY(retry) ((retry) << 8) @@ -293,6 +297,27 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, } EXPORT_SYMBOL(fw_send_request); +void fw_send_stream_packet(struct fw_card *card, struct fw_packet *p, + int generation, int speed, int channel, int sy, int tag, + void *payload, size_t length, fw_packet_callback_t callback) +{ + p->callback = callback; + p->header[0] = + HEADER_DATA_LENGTH(length) + | HEADER_TAG(tag) + | HEADER_CHANNEL(channel) + | HEADER_TCODE(TCODE_STREAM_DATA) + | HEADER_SY(sy); + p->header_length = 4; + p->payload = payload; + p->payload_length = length; + p->speed = speed; + p->generation = generation; + p->ack = 0; + + card->driver->send_request(card, p); +} + struct transaction_callback_data { struct completion done; void *payload; diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 35d0a4b..eed2e29 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -407,6 +407,10 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data); +void fw_send_stream_packet(struct fw_card *card, struct fw_packet *p, + int generation, int speed, int channel, int sy, int tag, + void *payload, size_t length, fw_packet_callback_t callback); + int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction); void fw_flush_transactions(struct fw_card *card); diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 2e35379b..4dfc84d 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -246,6 +246,7 @@ union fw_cdev_event { #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) #define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed) #define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) +#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet) /* * FW_CDEV_VERSION History @@ -609,4 +610,30 @@ struct fw_cdev_get_speed { __u32 max_speed; }; +/** + * struct fw_cdev_send_stream_packet - send an asynchronous stream packet + * @generation: Bus generation where the packet is valid + * @speed: Speed code to send the packet at + * @channel: Channel to send the packet on + * @sy: Four-bit sy code for the packet + * @tag: Two-bit tag field to use for the packet + * @size: Size of the packet's data payload + * @data: Userspace pointer to the payload + * + * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet + * to every device (that is listening to the specified channel) on the + * firewire bus. It is the applications's job to ensure + * that the intended device(s) will be able to receive the packet at the chosen + * transmit speed. + */ +struct fw_cdev_send_stream_packet { + __u32 generation; + __u32 speed; + __u32 channel; + __u32 sy; + __u32 tag; + __u32 size; + __u64 data; +}; + #endif /* _LINUX_FIREWIRE_CDEV_H */ -- cgit v0.10.2 From 6104ee92d62ea3638b67494fcf061cb4b9b9d518 Mon Sep 17 00:00:00 2001 From: Jay Fenlason Date: Mon, 23 Feb 2009 15:59:34 -0500 Subject: firewire: broadcast channel support This patch adds the ISO broadcast channel support that is required of a 1394a IRM. In specific, if the local device the IRM, it allocates ISO channel 31 and sets the broadcast channel register of all devices on the local bus to BROADCAST_CHANNEL_INITIAL | BROADCAST_CHANNEL_VALID to indicate that channel 31 can be use for broadcast messages. One minor complication is that on startup the local device may become IRM before all the devices on the bus have been enumerated by the stack. Therefore we have to keep a "the local device is IRM" flag and possibly set the broadcast channel register of new devices at enumeration time. Signed-off-by: Jay Fenlason Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 08a7e18..f2b363e 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -181,6 +181,147 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) mutex_unlock(&card_mutex); } +/* ------------------------------------------------------------------ */ +/* Code to handle 1394a broadcast channel */ + +#define THIRTY_TWO_CHANNELS (0xFFFFFFFFU) +#define IRM_RETRIES 2 + +/* + * The abi is set by device_for_each_child(), even though we have no use + * for data, nor do we have a meaningful return value. + */ +int fw_irm_set_broadcast_channel_register(struct device *dev, void *data) +{ + struct fw_device *d; + int rcode; + int node_id; + int max_speed; + int retries; + int generation; + __be32 regval; + struct fw_card *card; + + d = fw_device(dev); + /* FIXME: do we need locking here? */ + generation = d->generation; + smp_rmb(); /* Ensure generation is at least as old as node_id */ + node_id = d->node_id; + max_speed = d->max_speed; + retries = IRM_RETRIES; + card = d->card; +tryagain_r: + rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, + node_id, generation, max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + ®val, 4); + switch (rcode) { + case RCODE_BUSY: + if (retries--) + goto tryagain_r; + fw_notify("node %x read broadcast channel busy\n", + node_id); + return 0; + + default: + fw_notify("node %x read broadcast channel failed %x\n", + node_id, rcode); + return 0; + + case RCODE_COMPLETE: + /* + * Paranoid reporting of nonstandard broadcast channel + * contents goes here + */ + if (regval != cpu_to_be32(BROADCAST_CHANNEL_INITIAL)) + return 0; + break; + } + retries = IRM_RETRIES; + regval = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | + BROADCAST_CHANNEL_VALID); +tryagain_w: + rcode = fw_run_transaction(card, + TCODE_WRITE_QUADLET_REQUEST, node_id, + generation, max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + ®val, 4); + switch (rcode) { + case RCODE_BUSY: + if (retries--) + goto tryagain_w; + fw_notify("node %x write broadcast channel busy\n", + node_id); + return 0; + + default: + fw_notify("node %x write broadcast channel failed %x\n", + node_id, rcode); + return 0; + + case RCODE_COMPLETE: + return 0; + } + return 0; +} + +static void +irm_allocate_broadcast(struct fw_device *irm_dev, struct device *locald) +{ + u32 generation; + u32 node_id; + u32 max_speed; + u32 retries; + __be32 old_data; + __be32 lock_data[2]; + int rcode; + + /* + * The device we are updating is the IRM, so we must do + * some extra work. + */ + retries = IRM_RETRIES; + generation = irm_dev->generation; + /* FIXME: do we need locking here? */ + smp_rmb(); + node_id = irm_dev->node_id; + max_speed = irm_dev->max_speed; + + lock_data[0] = cpu_to_be32(THIRTY_TWO_CHANNELS); + lock_data[1] = cpu_to_be32(THIRTY_TWO_CHANNELS & ~1); +tryagain: + old_data = lock_data[0]; + rcode = fw_run_transaction(irm_dev->card, TCODE_LOCK_COMPARE_SWAP, + node_id, generation, max_speed, + CSR_REGISTER_BASE+CSR_CHANNELS_AVAILABLE_HI, + &lock_data[0], 8); + switch (rcode) { + case RCODE_BUSY: + if (retries--) + goto tryagain; + /* fallthrough */ + default: + fw_error("node %x: allocate broadcast channel failed (%x)\n", + node_id, rcode); + return; + + case RCODE_COMPLETE: + if (lock_data[0] == old_data) + break; + if (retries--) { + lock_data[1] = cpu_to_be32(be32_to_cpu(lock_data[0])&~1); + goto tryagain; + } + fw_error("node %x: allocate broadcast channel failed: too many" + " retries\n", node_id); + return; + } + irm_dev->card->is_irm = true; + device_for_each_child(locald, NULL, fw_irm_set_broadcast_channel_register); +} +/* ------------------------------------------------------------------ */ + + static const char gap_count_table[] = { 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 }; @@ -198,8 +339,8 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) static void fw_card_bm_work(struct work_struct *work) { struct fw_card *card = container_of(work, struct fw_card, work.work); - struct fw_device *root_device; - struct fw_node *root_node, *local_node; + struct fw_device *root_device, *irm_device, *local_device; + struct fw_node *root_node, *local_node, *irm_node; unsigned long flags; int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; bool do_reset = false; @@ -208,8 +349,10 @@ static void fw_card_bm_work(struct work_struct *work) __be32 lock_data[2]; spin_lock_irqsave(&card->lock, flags); + card->is_irm = false; local_node = card->local_node; root_node = card->root_node; + irm_node = card->irm_node; if (local_node == NULL) { spin_unlock_irqrestore(&card->lock, flags); @@ -217,6 +360,7 @@ static void fw_card_bm_work(struct work_struct *work) } fw_node_get(local_node); fw_node_get(root_node); + fw_node_get(irm_node); generation = card->generation; root_device = root_node->data; @@ -225,7 +369,8 @@ static void fw_card_bm_work(struct work_struct *work) root_device_is_cmc = root_device && root_device->cmc; root_id = root_node->node_id; grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); - + irm_device = irm_node->data; + local_device = local_node->data; if (is_next_generation(generation, card->bm_generation) || (card->bm_generation != generation && grace)) { /* @@ -240,8 +385,8 @@ static void fw_card_bm_work(struct work_struct *work) * next generation. */ - irm_id = card->irm_node->node_id; - if (!card->irm_node->link_on) { + irm_id = irm_node->node_id; + if (!irm_node->link_on) { new_root_id = local_node->node_id; fw_notify("IRM has link off, making local node (%02x) root.\n", new_root_id); @@ -263,9 +408,15 @@ static void fw_card_bm_work(struct work_struct *work) goto out; if (rcode == RCODE_COMPLETE && - lock_data[0] != cpu_to_be32(0x3f)) + lock_data[0] != cpu_to_be32(0x3f)) { /* Somebody else is BM, let them do the work. */ + if (irm_id == local_node->node_id) { + /* But we are IRM, so do irm-y things */ + irm_allocate_broadcast(irm_device, + card->device); + } goto out; + } spin_lock_irqsave(&card->lock, flags); @@ -357,10 +508,19 @@ static void fw_card_bm_work(struct work_struct *work) card->index, new_root_id, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count); fw_core_initiate_bus_reset(card, 1); + } else if (irm_node->node_id == local_node->node_id) { + /* + * We are IRM, so do irm-y things. + * There's no reason to do this if we're doing a reset. . . + * We'll be back. + */ + irm_allocate_broadcast(irm_device, card->device); } + out: fw_node_put(root_node); fw_node_put(local_node); + fw_node_put(irm_node); out_put_card: fw_card_put(card); } diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index 633e44d..a40444e 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -849,6 +849,9 @@ static void fw_device_init(struct work_struct *work) device->config_rom[3], device->config_rom[4], 1 << device->max_speed); device->config_rom_retries = 0; + if (device->card->is_irm) + fw_irm_set_broadcast_channel_register(&device->device, + NULL); } /* diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index eed2e29..f90f09c 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -230,6 +230,11 @@ struct fw_card { u8 color; /* must be u8 to match the definition in struct fw_node */ int gap_count; bool beta_repeaters_present; + /* + * Set if the local device is the IRM and the broadcast channel + * was allocated. + */ + bool is_irm; int index; @@ -438,4 +443,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); +extern int fw_irm_set_broadcast_channel_register(struct device *dev, + void *data); + #endif /* __fw_transaction_h */ -- cgit v0.10.2 From c8a25900f35e575938c791507894c036c0f2ca7d Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 20:59:16 +0100 Subject: firewire: cdev: amendment to "add ioctl to query maximum transmission speed" The as yet unreleased FW_CDEV_IOC_GET_SPEED ioctl puts only a single integer into the parameter buffer. We can use ioctl()'s return value instead. (Also: Some whitespace change in firewire-cdev.h.) Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 539dae5..2784f91 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -1214,13 +1214,14 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); } +/* + * Returns a speed code: Maximum speed to or from this device, + * limited by the device's link speed, the local node's link speed, + * and all PHY port speeds between the two links. + */ static int ioctl_get_speed(struct client *client, void *buffer) { - struct fw_cdev_get_speed *request = buffer; - - request->max_speed = client->device->max_speed; - - return 0; + return client->device->max_speed; } static int ioctl_send_broadcast_request(struct client *client, void *buffer) diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 4dfc84d..de40357 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -223,28 +223,28 @@ union fw_cdev_event { }; /* available since kernel version 2.6.22 */ -#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) -#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) -#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) -#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) -#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) -#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) -#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) -#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) -#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) -#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) -#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) -#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) +#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) +#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) +#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) +#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) +#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) +#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) +#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) +#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) +#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) +#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) /* available since kernel version 2.6.24 */ -#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) +#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) /* available since kernel version 2.6.30 */ #define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) #define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) #define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) -#define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed) +#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */ #define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) #define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet) @@ -602,15 +602,6 @@ struct fw_cdev_allocate_iso_resource { }; /** - * struct fw_cdev_get_speed - Query maximum speed to or from this device - * @max_speed: Speed code; minimum of the device's link speed, the local node's - * link speed, and all PHY port speeds between the two links - */ -struct fw_cdev_get_speed { - __u32 max_speed; -}; - -/** * struct fw_cdev_send_stream_packet - send an asynchronous stream packet * @generation: Bus generation where the packet is valid * @speed: Speed code to send the packet at -- cgit v0.10.2 From de487da8ca5839d057e1f4b57ee3f387e180b800 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:00:23 +0100 Subject: firewire: cdev: secure add_descriptor ioctl The access permissions and ownership or ACL of /dev/fw* character device files will typically be set based on the device type of the respective nodes, as obtained by firewire-core from descriptors in the device's configuration ROM. An example policy is to deny write permission by default but grant write permission to files of AV/C video and audio devices and IIDC video devices. The FW_CDEV_IOC_ADD_DESCRIPTOR ioctl could be used to partly subvert such a policy: Find a device file with relaxed permissions, use the ioctl to add a descriptor with AV/C marker to the local node's ROM, thus gain access to the local node's character device file. (This is only possible if there are udev scripts installed which actively relax permissions for known device types and if there is a device of such a type connected.) Accessibility of the local node's device file is relevant to host security if the host contains two or more IEEE 1394 link layer controllers which are plugged into a single bus. Therefore change the ABI to deny FW_CDEV_IOC_ADD_DESCRIPTOR if the file belongs to a remote node. (This change has no impact on known implementers of the ABI: None of them uses the ioctl yet.) Also clarify the documentation: The ioctl affects all local nodes, not just one local node. Cc: stable@kernel.org Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 2784f91..160cb27 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -742,9 +742,17 @@ static void release_descriptor(struct client *client, static int ioctl_add_descriptor(struct client *client, void *buffer) { struct fw_cdev_add_descriptor *request = buffer; + struct fw_card *card = client->device->card; struct descriptor_resource *r; int ret; + /* Access policy: Allow this ioctl only on local nodes' device files. */ + spin_lock_irq(&card->lock); + ret = client->device->node_id != card->local_node->node_id; + spin_unlock_irq(&card->lock); + if (ret) + return -ENOSYS; + if (request->length > 256) return -EINVAL; diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index de40357..25bc827 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -394,6 +394,9 @@ struct fw_cdev_initiate_bus_reset { * If successful, the kernel adds the descriptor and writes back a handle to the * kernel-side object to be used for later removal of the descriptor block and * immediate key. + * + * This ioctl affects the configuration ROMs of all local nodes. + * The ioctl only succeeds on device files which represent a local node. */ struct fw_cdev_add_descriptor { __u32 immediate; @@ -409,7 +412,7 @@ struct fw_cdev_add_descriptor { * descriptor was added * * Remove a descriptor block and accompanying immediate key from the local - * node's configuration ROM. + * nodes' configuration ROMs. */ struct fw_cdev_remove_descriptor { __u32 handle; -- cgit v0.10.2 From 207fbefb18de9bc6f871e4008da29879c90cb67e Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:01:08 +0100 Subject: firewire: cdev: fix race of ioctl_send_request with bus reset The bus reset handler concurrently frees client->device->node. Use device->node_id instead. This is equivalent to device->node->node_id while device->generation is current. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 160cb27..c54e019 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -585,7 +585,7 @@ static int ioctl_send_request(struct client *client, void *buffer) return -EINVAL; } - return init_request(client, request, client->device->node->node_id, + return init_request(client, request, client->device->node_id, client->device->max_speed); } -- cgit v0.10.2 From 664d8010b170ae8b3ce9268b4f4da934d27b0491 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:01:54 +0100 Subject: firewire: cdev: simplify FW_CDEV_IOC_SEND_REQUEST return value This changes the ioctl() return value of FW_CDEV_IOC_SEND_REQUEST and of the as yet unreleased FW_CDEV_IOC_SEND_BROADCAST_REQUEST. They used to return sizeof(struct fw_cdev_send_request *) + data_length which is obviously a failed attempt to emulate the return value of raw1394's respective interface which uses write() instead of ioctl(). However, the first summand, as size of a kernel pointer, is entirely meaningless to clients and the second summand is already known to clients. And the result does not resemble raw1394's write() return code anyway. So simplify it to a constant non-negative value, i.e. 0. The only dangers here would be that future client implementations check for error by ret != 0 instead of ret < 0 when running on top of an old kernel; or that current clients interpret ret = 0 or more as failure. But both are hypothetical cases which don't justify to return irritating values. While we touch this code, also remove "& 0x1f" from tcode in the call of fw_send_request. The tcode cannot be bigger than 0x1f at this point. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index c54e019..95a2075 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -549,15 +549,11 @@ static int init_request(struct client *client, client_get(client); fw_send_request(client->device->card, &e->r.transaction, - request->tcode & 0x1f, destination_id, - request->generation, speed, request->offset, - e->response.data, request->length, - complete_transaction, e); + request->tcode, destination_id, request->generation, + speed, request->offset, e->response.data, + request->length, complete_transaction, e); + return 0; - if (request->data) - return sizeof(request) + request->length; - else - return sizeof(request); failed: kfree(e); -- cgit v0.10.2 From 18e9b10fcdc090d3a38606958167d5923c7099b7 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:02:21 +0100 Subject: firewire: cdev: add closure to async stream ioctl This changes the as yet unreleased FW_CDEV_IOC_SEND_STREAM_PACKET ioctl to generate an fw_cdev_event_response event just like the other two ioctls for asynchronous request transmission do. This way, clients get feedback on successful or unsuccessful transmission. This also adds input validation for length, tag, channel, sy, speed. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 95a2075..7eb6594 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -522,7 +522,8 @@ static int init_request(struct client *client, struct outbound_transaction_event *e; int ret; - if (request->length > 4096 || request->length > 512 << speed) + if (request->tcode != TCODE_STREAM_DATA && + (request->length > 4096 || request->length > 512 << speed)) return -EIO; e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); @@ -1247,36 +1248,27 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer) return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); } -struct stream_packet { - struct fw_packet packet; - u8 data[0]; -}; - -static void send_stream_packet_done(struct fw_packet *packet, - struct fw_card *card, int status) -{ - kfree(container_of(packet, struct stream_packet, packet)); -} - static int ioctl_send_stream_packet(struct client *client, void *buffer) { - struct fw_cdev_send_stream_packet *request = buffer; - struct stream_packet *p; + struct fw_cdev_send_stream_packet *p = buffer; + struct fw_cdev_send_request request; + int dest; - p = kmalloc(sizeof(*p) + request->size, GFP_KERNEL); - if (p == NULL) - return -ENOMEM; + if (p->speed > client->device->card->link_speed || + p->length > 1024 << p->speed) + return -EIO; - if (request->data && - copy_from_user(p->data, u64_to_uptr(request->data), request->size)) { - kfree(p); - return -EFAULT; - } - fw_send_stream_packet(client->device->card, &p->packet, - request->generation, request->speed, - request->channel, request->sy, request->tag, - p->data, request->size, send_stream_packet_done); - return 0; + if (p->tag > 3 || p->channel > 63 || p->sy > 15) + return -EINVAL; + + dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); + request.tcode = TCODE_STREAM_DATA; + request.length = p->length; + request.closure = p->closure; + request.data = p->data; + request.generation = p->generation; + + return init_request(client, &request, dest, p->speed); } static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index e3da589..4a9b374 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -37,10 +37,6 @@ #include "fw-topology.h" #include "fw-device.h" -#define HEADER_TAG(tag) ((tag) << 14) -#define HEADER_CHANNEL(ch) ((ch) << 8) -#define HEADER_SY(sy) ((sy) << 0) - #define HEADER_PRI(pri) ((pri) << 0) #define HEADER_TCODE(tcode) ((tcode) << 4) #define HEADER_RETRY(retry) ((retry) << 8) @@ -158,6 +154,18 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, { int ext_tcode; + if (tcode == TCODE_STREAM_DATA) { + packet->header[0] = + HEADER_DATA_LENGTH(length) | + destination_id | + HEADER_TCODE(TCODE_STREAM_DATA); + packet->header_length = 4; + packet->payload = payload; + packet->payload_length = length; + + goto common; + } + if (tcode > 0x10) { ext_tcode = tcode & ~0x10; tcode = TCODE_LOCK_REQUEST; @@ -204,7 +212,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, packet->payload_length = 0; break; } - + common: packet->speed = speed; packet->generation = generation; packet->ack = 0; @@ -246,6 +254,9 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, * @param callback function to be called when the transaction is completed * @param callback_data pointer to arbitrary data, which will be * passed to the callback + * + * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller + * needs to synthesize @destination_id with fw_stream_packet_destination_id(). */ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, @@ -297,27 +308,6 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, } EXPORT_SYMBOL(fw_send_request); -void fw_send_stream_packet(struct fw_card *card, struct fw_packet *p, - int generation, int speed, int channel, int sy, int tag, - void *payload, size_t length, fw_packet_callback_t callback) -{ - p->callback = callback; - p->header[0] = - HEADER_DATA_LENGTH(length) - | HEADER_TAG(tag) - | HEADER_CHANNEL(channel) - | HEADER_TCODE(TCODE_STREAM_DATA) - | HEADER_SY(sy); - p->header_length = 4; - p->payload = payload; - p->payload_length = length; - p->speed = speed; - p->generation = generation; - p->ack = 0; - - card->driver->send_request(card, p); -} - struct transaction_callback_data { struct completion done; void *payload; diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index f90f09c..d4f42ce 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -412,10 +412,6 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data); -void fw_send_stream_packet(struct fw_card *card, struct fw_packet *p, - int generation, int speed, int channel, int sy, int tag, - void *payload, size_t length, fw_packet_callback_t callback); - int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction); void fw_flush_transactions(struct fw_card *card); @@ -425,6 +421,11 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count); +static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) +{ + return tag << 14 | channel << 8 | sy; +} + /* * Called by the topology code to inform the device code of node * activity; found, lost, or updated nodes. diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 25bc827..c6b3ca3 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -606,28 +606,29 @@ struct fw_cdev_allocate_iso_resource { /** * struct fw_cdev_send_stream_packet - send an asynchronous stream packet - * @generation: Bus generation where the packet is valid - * @speed: Speed code to send the packet at - * @channel: Channel to send the packet on - * @sy: Four-bit sy code for the packet - * @tag: Two-bit tag field to use for the packet - * @size: Size of the packet's data payload - * @data: Userspace pointer to the payload + * @length: Length of outgoing payload, in bytes + * @tag: Data format tag + * @channel: Isochronous channel to transmit to + * @sy: Synchronization code + * @closure: Passed back to userspace in the response event + * @data: Userspace pointer to payload + * @generation: The bus generation where packet is valid + * @speed: Speed to transmit at * * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet - * to every device (that is listening to the specified channel) on the - * firewire bus. It is the applications's job to ensure - * that the intended device(s) will be able to receive the packet at the chosen - * transmit speed. + * to every device which is listening to the specified channel. The kernel + * writes an &fw_cdev_event_response event which indicates success or failure of + * the transmission. */ struct fw_cdev_send_stream_packet { - __u32 generation; - __u32 speed; + __u32 length; + __u32 tag; __u32 channel; __u32 sy; - __u32 tag; - __u32 size; + __u64 closure; __u64 data; + __u32 generation; + __u32 speed; }; #endif /* _LINUX_FIREWIRE_CDEV_H */ -- cgit v0.10.2 From a38a00fdef98a8eda23a25e54490b32865bc7c33 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:07:06 +0100 Subject: firewire: core: drop unused call parameters of close_transaction All callers inserted NULL and 0 here. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 4a9b374..283dac6 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -65,8 +65,7 @@ #define PHY_IDENTIFIER(id) ((id) << 30) static int close_transaction(struct fw_transaction *transaction, - struct fw_card *card, int rcode, - u32 *payload, size_t length) + struct fw_card *card, int rcode) { struct fw_transaction *t; unsigned long flags; @@ -82,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction, spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { - t->callback(card, rcode, payload, length, t->callback_data); + t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } @@ -110,7 +109,7 @@ int fw_cancel_transaction(struct fw_card *card, * if the transaction is still pending and remove it in that case. */ - return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); + return close_transaction(transaction, card, RCODE_CANCELLED); } EXPORT_SYMBOL(fw_cancel_transaction); @@ -122,7 +121,7 @@ static void transmit_complete_callback(struct fw_packet *packet, switch (status) { case ACK_COMPLETE: - close_transaction(t, card, RCODE_COMPLETE, NULL, 0); + close_transaction(t, card, RCODE_COMPLETE); break; case ACK_PENDING: t->timestamp = packet->timestamp; @@ -130,20 +129,20 @@ static void transmit_complete_callback(struct fw_packet *packet, case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: - close_transaction(t, card, RCODE_BUSY, NULL, 0); + close_transaction(t, card, RCODE_BUSY); break; case ACK_DATA_ERROR: - close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); + close_transaction(t, card, RCODE_DATA_ERROR); break; case ACK_TYPE_ERROR: - close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); + close_transaction(t, card, RCODE_TYPE_ERROR); break; default: /* * In this case the ack is really a juju specific * rcode, so just forward that to the callback. */ - close_transaction(t, card, status, NULL, 0); + close_transaction(t, card, status); break; } } -- cgit v0.10.2 From e1dc7cab43619a2fbc90fd4cd712bd3fff703768 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:07:46 +0100 Subject: firewire: core: increase bus manager grace period Per IEEE 1394 clause 8.4.2.5, bus manager capable nodes which are not incumbent shall wait at least 125ms before trying to establish themselves as bus manager. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index f2b363e..b3463b8 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -368,9 +368,11 @@ static void fw_card_bm_work(struct work_struct *work) atomic_read(&root_device->state) == FW_DEVICE_RUNNING; root_device_is_cmc = root_device && root_device->cmc; root_id = root_node->node_id; - grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); irm_device = irm_node->data; local_device = local_node->data; + + grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); + if (is_next_generation(generation, card->bm_generation) || (card->bm_generation != generation && grace)) { /* @@ -434,12 +436,11 @@ static void fw_card_bm_work(struct work_struct *work) } } else if (card->bm_generation != generation) { /* - * OK, we weren't BM in the last generation, and it's - * less than 100ms since last bus reset. Reschedule - * this task 100ms from now. + * We weren't BM in the last generation, and the last + * bus reset is less than 125ms ago. Reschedule this job. */ spin_unlock_irqrestore(&card->lock, flags); - fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); + fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); goto out; } -- cgit v0.10.2 From cbae787c0f288c3ad385ad4165ae30b5500a1f23 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:08:37 +0100 Subject: firewire: core: simplify broadcast channel allocation fw-iso.c has channel allocation code now, use it. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index b3463b8..d63d0ed 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -181,10 +181,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) mutex_unlock(&card_mutex); } -/* ------------------------------------------------------------------ */ -/* Code to handle 1394a broadcast channel */ - -#define THIRTY_TWO_CHANNELS (0xFFFFFFFFU) #define IRM_RETRIES 2 /* @@ -265,62 +261,18 @@ tryagain_w: return 0; } -static void -irm_allocate_broadcast(struct fw_device *irm_dev, struct device *locald) +static void allocate_broadcast_channel(struct fw_card *card, int generation) { - u32 generation; - u32 node_id; - u32 max_speed; - u32 retries; - __be32 old_data; - __be32 lock_data[2]; - int rcode; - - /* - * The device we are updating is the IRM, so we must do - * some extra work. - */ - retries = IRM_RETRIES; - generation = irm_dev->generation; - /* FIXME: do we need locking here? */ - smp_rmb(); - node_id = irm_dev->node_id; - max_speed = irm_dev->max_speed; - - lock_data[0] = cpu_to_be32(THIRTY_TWO_CHANNELS); - lock_data[1] = cpu_to_be32(THIRTY_TWO_CHANNELS & ~1); -tryagain: - old_data = lock_data[0]; - rcode = fw_run_transaction(irm_dev->card, TCODE_LOCK_COMPARE_SWAP, - node_id, generation, max_speed, - CSR_REGISTER_BASE+CSR_CHANNELS_AVAILABLE_HI, - &lock_data[0], 8); - switch (rcode) { - case RCODE_BUSY: - if (retries--) - goto tryagain; - /* fallthrough */ - default: - fw_error("node %x: allocate broadcast channel failed (%x)\n", - node_id, rcode); - return; - - case RCODE_COMPLETE: - if (lock_data[0] == old_data) - break; - if (retries--) { - lock_data[1] = cpu_to_be32(be32_to_cpu(lock_data[0])&~1); - goto tryagain; - } - fw_error("node %x: allocate broadcast channel failed: too many" - " retries\n", node_id); - return; + int channel, bandwidth = 0; + + fw_iso_resource_manage(card, generation, 1ULL << 31, + &channel, &bandwidth, true); + if (channel == 31) { + card->is_irm = true; + device_for_each_child(card->device, NULL, + fw_irm_set_broadcast_channel_register); } - irm_dev->card->is_irm = true; - device_for_each_child(locald, NULL, fw_irm_set_broadcast_channel_register); } -/* ------------------------------------------------------------------ */ - static const char gap_count_table[] = { 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 @@ -339,10 +291,11 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) static void fw_card_bm_work(struct work_struct *work) { struct fw_card *card = container_of(work, struct fw_card, work.work); - struct fw_device *root_device, *irm_device, *local_device; - struct fw_node *root_node, *local_node, *irm_node; + struct fw_device *root_device; + struct fw_node *root_node; unsigned long flags; - int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; + int root_id, new_root_id, irm_id, local_id; + int gap_count, generation, grace, rcode; bool do_reset = false; bool root_device_is_running; bool root_device_is_cmc; @@ -350,26 +303,22 @@ static void fw_card_bm_work(struct work_struct *work) spin_lock_irqsave(&card->lock, flags); card->is_irm = false; - local_node = card->local_node; - root_node = card->root_node; - irm_node = card->irm_node; - if (local_node == NULL) { + if (card->local_node == NULL) { spin_unlock_irqrestore(&card->lock, flags); goto out_put_card; } - fw_node_get(local_node); - fw_node_get(root_node); - fw_node_get(irm_node); generation = card->generation; + root_node = card->root_node; + fw_node_get(root_node); root_device = root_node->data; root_device_is_running = root_device && atomic_read(&root_device->state) == FW_DEVICE_RUNNING; root_device_is_cmc = root_device && root_device->cmc; - root_id = root_node->node_id; - irm_device = irm_node->data; - local_device = local_node->data; + root_id = root_node->node_id; + irm_id = card->irm_node->node_id; + local_id = card->local_node->node_id; grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); @@ -387,16 +336,15 @@ static void fw_card_bm_work(struct work_struct *work) * next generation. */ - irm_id = irm_node->node_id; - if (!irm_node->link_on) { - new_root_id = local_node->node_id; + if (!card->irm_node->link_on) { + new_root_id = local_id; fw_notify("IRM has link off, making local node (%02x) root.\n", new_root_id); goto pick_me; } lock_data[0] = cpu_to_be32(0x3f); - lock_data[1] = cpu_to_be32(local_node->node_id); + lock_data[1] = cpu_to_be32(local_id); spin_unlock_irqrestore(&card->lock, flags); @@ -411,12 +359,11 @@ static void fw_card_bm_work(struct work_struct *work) if (rcode == RCODE_COMPLETE && lock_data[0] != cpu_to_be32(0x3f)) { - /* Somebody else is BM, let them do the work. */ - if (irm_id == local_node->node_id) { - /* But we are IRM, so do irm-y things */ - irm_allocate_broadcast(irm_device, - card->device); - } + + /* Somebody else is BM. Only act as IRM. */ + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); + goto out; } @@ -429,7 +376,7 @@ static void fw_card_bm_work(struct work_struct *work) * do a bus reset and pick the local node as * root, and thus, IRM. */ - new_root_id = local_node->node_id; + new_root_id = local_id; fw_notify("BM lock failed, making local node (%02x) root.\n", new_root_id); goto pick_me; @@ -456,7 +403,7 @@ static void fw_card_bm_work(struct work_struct *work) * Either link_on is false, or we failed to read the * config rom. In either case, pick another root. */ - new_root_id = local_node->node_id; + new_root_id = local_id; } else if (!root_device_is_running) { /* * If we haven't probed this device yet, bail out now @@ -478,7 +425,7 @@ static void fw_card_bm_work(struct work_struct *work) * successfully read the config rom, but it's not * cycle master capable. */ - new_root_id = local_node->node_id; + new_root_id = local_id; } pick_me: @@ -509,19 +456,14 @@ static void fw_card_bm_work(struct work_struct *work) card->index, new_root_id, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count); fw_core_initiate_bus_reset(card, 1); - } else if (irm_node->node_id == local_node->node_id) { - /* - * We are IRM, so do irm-y things. - * There's no reason to do this if we're doing a reset. . . - * We'll be back. - */ - irm_allocate_broadcast(irm_device, card->device); + /* Will allocate broadcast channel after the reset. */ + } else { + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); } out: fw_node_put(root_node); - fw_node_put(local_node); - fw_node_put(irm_node); out_put_card: fw_card_put(card); } -- cgit v0.10.2 From 7889b60ee71eafaf50699a154a2455424bb92daa Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Tue, 10 Mar 2009 21:09:28 +0100 Subject: firewire: core: optimize propagation of BROADCAST_CHANNEL Cache the test result of whether a device implements BROADCAST_CHANNEL. This minimizes traffic on the bus after each bus reset. A majority of devices does not implement BROADCAST_CHANNEL. Remove busy retries; just rely on the hardware to retry requests to busy responders. Remove unnecessary log messages. Rename the flag is_irm to broadcast_channel_allocated to better reflect its meaning. Reset the flag earlier in fw_core_handle_bus_reset. Pass the generation down as a call parameter; that way generation can't be newer than card->broadcast_channel_allocated and device->node_id. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index d63d0ed..8b8c8c2 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -181,83 +181,9 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) mutex_unlock(&card_mutex); } -#define IRM_RETRIES 2 - -/* - * The abi is set by device_for_each_child(), even though we have no use - * for data, nor do we have a meaningful return value. - */ -int fw_irm_set_broadcast_channel_register(struct device *dev, void *data) +static int set_broadcast_channel(struct device *dev, void *data) { - struct fw_device *d; - int rcode; - int node_id; - int max_speed; - int retries; - int generation; - __be32 regval; - struct fw_card *card; - - d = fw_device(dev); - /* FIXME: do we need locking here? */ - generation = d->generation; - smp_rmb(); /* Ensure generation is at least as old as node_id */ - node_id = d->node_id; - max_speed = d->max_speed; - retries = IRM_RETRIES; - card = d->card; -tryagain_r: - rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, - node_id, generation, max_speed, - CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, - ®val, 4); - switch (rcode) { - case RCODE_BUSY: - if (retries--) - goto tryagain_r; - fw_notify("node %x read broadcast channel busy\n", - node_id); - return 0; - - default: - fw_notify("node %x read broadcast channel failed %x\n", - node_id, rcode); - return 0; - - case RCODE_COMPLETE: - /* - * Paranoid reporting of nonstandard broadcast channel - * contents goes here - */ - if (regval != cpu_to_be32(BROADCAST_CHANNEL_INITIAL)) - return 0; - break; - } - retries = IRM_RETRIES; - regval = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | - BROADCAST_CHANNEL_VALID); -tryagain_w: - rcode = fw_run_transaction(card, - TCODE_WRITE_QUADLET_REQUEST, node_id, - generation, max_speed, - CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, - ®val, 4); - switch (rcode) { - case RCODE_BUSY: - if (retries--) - goto tryagain_w; - fw_notify("node %x write broadcast channel busy\n", - node_id); - return 0; - - default: - fw_notify("node %x write broadcast channel failed %x\n", - node_id, rcode); - return 0; - - case RCODE_COMPLETE: - return 0; - } + fw_device_set_broadcast_channel(fw_device(dev), (long)data); return 0; } @@ -268,9 +194,9 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, &bandwidth, true); if (channel == 31) { - card->is_irm = true; - device_for_each_child(card->device, NULL, - fw_irm_set_broadcast_channel_register); + card->broadcast_channel_allocated = true; + device_for_each_child(card->device, (void *)(long)generation, + set_broadcast_channel); } } @@ -302,7 +228,6 @@ static void fw_card_bm_work(struct work_struct *work) __be32 lock_data[2]; spin_lock_irqsave(&card->lock, flags); - card->is_irm = false; if (card->local_node == NULL) { spin_unlock_irqrestore(&card->lock, flags); diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index a40444e..a47e212 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -518,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation) kfree(old_rom); ret = 0; - device->cmc = rom[2] & 1 << 30; + device->cmc = rom[2] >> 30 & 1; out: kfree(rom); @@ -756,6 +756,44 @@ static int lookup_existing_device(struct device *dev, void *data) return match; } +enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; + +void fw_device_set_broadcast_channel(struct fw_device *device, int generation) +{ + struct fw_card *card = device->card; + __be32 data; + int rcode; + + if (!card->broadcast_channel_allocated) + return; + + if (device->bc_implemented == BC_UNKNOWN) { + rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, + device->node_id, generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + &data, 4); + switch (rcode) { + case RCODE_COMPLETE: + if (data & cpu_to_be32(1 << 31)) { + device->bc_implemented = BC_IMPLEMENTED; + break; + } + /* else fall through to case address error */ + case RCODE_ADDRESS_ERROR: + device->bc_implemented = BC_UNIMPLEMENTED; + } + } + + if (device->bc_implemented == BC_IMPLEMENTED) { + data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | + BROADCAST_CHANNEL_VALID); + fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, + device->node_id, generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + &data, 4); + } +} + static void fw_device_init(struct work_struct *work) { struct fw_device *device = @@ -849,9 +887,8 @@ static void fw_device_init(struct work_struct *work) device->config_rom[3], device->config_rom[4], 1 << device->max_speed); device->config_rom_retries = 0; - if (device->card->is_irm) - fw_irm_set_broadcast_channel_register(&device->device, - NULL); + + fw_device_set_broadcast_channel(device, device->generation); } /* diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 3085a74..9758893 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -71,7 +71,6 @@ struct fw_device { int node_id; int generation; unsigned max_speed; - bool cmc; struct fw_card *card; struct device device; @@ -81,6 +80,9 @@ struct fw_device { u32 *config_rom; size_t config_rom_length; int config_rom_retries; + unsigned cmc:1; + unsigned bc_implemented:2; + struct delayed_work work; struct fw_attribute_group attribute_group; }; @@ -109,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device) struct fw_device *fw_device_get_by_devt(dev_t devt); int fw_device_enable_phys_dma(struct fw_device *device); +void fw_device_set_broadcast_channel(struct fw_device *device, int generation); void fw_device_cdev_update(struct fw_device *device); void fw_device_cdev_remove(struct fw_device *device); diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index b44131c..d0deecc 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c @@ -526,6 +526,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, spin_lock_irqsave(&card->lock, flags); + card->broadcast_channel_allocated = false; card->node_id = node_id; /* * Update node_id before generation to prevent anybody from using diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index d4f42ce..dfa7990 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -230,11 +230,6 @@ struct fw_card { u8 color; /* must be u8 to match the definition in struct fw_node */ int gap_count; bool beta_repeaters_present; - /* - * Set if the local device is the IRM and the broadcast channel - * was allocated. - */ - bool is_irm; int index; @@ -245,6 +240,7 @@ struct fw_card { int bm_retries; int bm_generation; + bool broadcast_channel_allocated; u32 broadcast_channel; u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; }; -- cgit v0.10.2 From e90874f64c89d8a3a8b287f67c5655141720c28d Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sat, 24 Jan 2009 19:41:46 +0100 Subject: ieee1394: sbp2: follow up on "ieee1394: inherit ud vendor_id from node vendor_id" Signed-off-by: Stefan Richter diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f3fd865..02255a8 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, "(firmware_revision 0x%06x, vendor_id 0x%06x," " model_id 0x%06x)", NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), - workarounds, firmware_revision, - ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, + workarounds, firmware_revision, ud->vendor_id, model); /* We would need one SCSI host template for each target to adjust -- cgit v0.10.2 From 1c4fb577aa5aeeace026d8295936947f0f0743f0 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 9 Feb 2009 22:05:06 +0100 Subject: ieee1394: Storage class should be before const qualifier The C99 specification states in section 6.11.5: The placement of a storage-class specifier other than at the beginning of the declaration specifiers in a declaration is an obsolescent feature. Signed-off-by: Tobias Klauser Signed-off-by: Stefan Richter diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c index 31400c8..d696f69 100644 --- a/drivers/ieee1394/csr.c +++ b/drivers/ieee1394/csr.c @@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = { .host_reset = host_reset, }; -const static struct hpsb_address_ops map_ops = { +static const struct hpsb_address_ops map_ops = { .read = read_maps, }; -const static struct hpsb_address_ops fcp_ops = { +static const struct hpsb_address_ops fcp_ops = { .write = write_fcp, }; -const static struct hpsb_address_ops reg_ops = { +static const struct hpsb_address_ops reg_ops = { .read = read_regs, .write = write_regs, .lock = lock_regs, .lock64 = lock64_regs, }; -const static struct hpsb_address_ops config_rom_ops = { +static const struct hpsb_address_ops config_rom_ops = { .read = read_config_rom, }; diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 1a919df..0ed0f80 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host); static void ether1394_host_reset(struct hpsb_host *host); /* Function for incoming 1394 packets */ -const static struct hpsb_address_ops addr_ops = { +static const struct hpsb_address_ops addr_ops = { .write = ether1394_write, }; diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 600e391..4bc4435 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c @@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, return retval; } -const static struct hpsb_address_ops dummy_ops; +static const struct hpsb_address_ops dummy_ops; /* dummy address spaces as lower and upper bounds of the host's a.s. list */ static void init_hpsb_highlevel(struct hpsb_host *host) diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index bad66c6..281229b 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags); -const static struct hpsb_address_ops arm_ops = { +static const struct hpsb_address_ops arm_ops = { .read = arm_read, .write = arm_write, .lock = arm_lock, diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 02255a8..092fbf0 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = { .host_reset = sbp2_host_reset, }; -const static struct hpsb_address_ops sbp2_ops = { +static const struct hpsb_address_ops sbp2_ops = { .write = sbp2_handle_status_write }; @@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *, static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, size_t, u16); -const static struct hpsb_address_ops sbp2_physdma_ops = { +static const struct hpsb_address_ops sbp2_physdma_ops = { .read = sbp2_handle_physdma_read, .write = sbp2_handle_physdma_write, }; -- cgit v0.10.2 From 421696887b0da241401710e83b0dffcc195bc484 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 15 Feb 2009 22:49:24 +0100 Subject: ieee1394: raw1394: add sparse annotations to raw1394_compat_write Eliminate the following warnings in raw1394_compat_write()'s error return path, seen on x86-64 with CONFIG_COMPAT=y: drivers/ieee1394/raw1394.c:381:17: warning: incorrect type in return expression (different address spaces) drivers/ieee1394/raw1394.c:381:17: expected char const [noderef] * drivers/ieee1394/raw1394.c:381:17: got void * drivers/ieee1394/raw1394.c:2252:14: warning: incorrect type in argument 1 (different address spaces) drivers/ieee1394/raw1394.c:2252:14: expected void const *ptr drivers/ieee1394/raw1394.c:2252:14: got char const [noderef] *[assigned] buffer drivers/ieee1394/raw1394.c:2253:19: warning: incorrect type in argument 1 (different address spaces) drivers/ieee1394/raw1394.c:2253:19: expected void const *ptr drivers/ieee1394/raw1394.c:2253:19: got char const [noderef] *[assigned] buffer Signed-off-by: Stefan Richter diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 281229b..9b71390 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf) { struct compat_raw1394_req __user *cr = (typeof(cr)) buf; struct raw1394_request __user *r; + r = compat_alloc_user_space(sizeof(struct raw1394_request)); #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) @@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf) C(tag) || C(sendb) || C(recvb)) - return ERR_PTR(-EFAULT); + return (__force const char __user *)ERR_PTR(-EFAULT); + return (const char __user *)r; } #undef C @@ -389,6 +391,7 @@ static int raw1394_compat_read(const char __user *buf, struct raw1394_request *r) { struct compat_raw1394_req __user *cr = (typeof(cr)) buf; + if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || P(type) || P(error) || @@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r) P(sendb) || P(recvb)) return -EFAULT; + return sizeof(struct compat_raw1394_req); } #undef P @@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer, sizeof(struct compat_raw1394_req) != sizeof(struct raw1394_request)) { buffer = raw1394_compat_write(buffer); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); + if (IS_ERR((__force void *)buffer)) + return PTR_ERR((__force void *)buffer); } else #endif if (count != sizeof(struct raw1394_request)) { -- cgit v0.10.2 From c64094684dad4a903099f1ff83b7db9b62782adc Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 15 Feb 2009 23:11:38 +0100 Subject: ieee1394: constify device ID tables Signed-off-by: Stefan Richter diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 3838bc4..4c2c025 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -2175,7 +2175,7 @@ static const struct file_operations dv1394_fops= * Export information about protocols/devices supported by this driver. */ #ifdef MODULE -static struct ieee1394_device_id dv1394_id_table[] = { +static const struct ieee1394_device_id dv1394_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 0ed0f80..4ca1035 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud) return eth1394_new_node(hi, ud); } -static struct ieee1394_device_id eth1394_id_table[] = { +static const struct ieee1394_device_id eth1394_id_table[] = { { .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION), diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 53aada5..a6d55be 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = { static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) { struct hpsb_protocol_driver *driver; - struct ieee1394_device_id *id; + const struct ieee1394_device_id *id; int length = 0; char *scratch = buf; @@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv) { struct hpsb_protocol_driver *driver; struct unit_directory *ud; - struct ieee1394_device_id *id; + const struct ieee1394_device_id *id; /* We only match unit directories */ if (dev->platform_data != &nodemgr_ud_platform_data) diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index ee5acdb..749b271 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h @@ -125,7 +125,7 @@ struct hpsb_protocol_driver { * probe function below can implement further protocol * dependent or vendor dependent checking. */ - struct ieee1394_device_id *id_table; + const struct ieee1394_device_id *id_table; /* * The update function is called when the node has just diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 9b71390..da5f882 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -2982,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file) * Export information about protocols/devices supported by this driver. */ #ifdef MODULE -static struct ieee1394_device_id raw1394_id_table[] = { +static const struct ieee1394_device_id raw1394_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 092fbf0..a51ab23 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -285,7 +285,7 @@ static const struct hpsb_address_ops sbp2_physdma_ops = { /* * Interface to driver core and IEEE 1394 core */ -static struct ieee1394_device_id sbp2_id_table[] = { +static const struct ieee1394_device_id sbp2_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 679a918..d287ba7 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops= * Export information about protocols/devices supported by this driver. */ #ifdef MODULE -static struct ieee1394_device_id video1394_id_table[] = { +static const struct ieee1394_device_id video1394_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, -- cgit v0.10.2 From 40cf65d149053889c8876c6c2b4ce204fde55baa Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 5 Mar 2009 19:13:43 +0100 Subject: DVB: firedtv: fix printk format mismatch Eliminate drivers/media/dvb/firewire/firedtv-avc.c: In function 'debug_fcp': drivers/media/dvb/firewire/firedtv-avc.c:156: warning: format '%d' expects type 'int', but argument 5 has type 'size_t' Acked-by: Mauro Carvalho Chehab Signed-off-by: Stefan Richter diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c index b55d9cc..1a300f0 100644 --- a/drivers/media/dvb/firewire/firedtv-avc.c +++ b/drivers/media/dvb/firewire/firedtv-avc.c @@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype) } static const char *debug_fcp_opcode(unsigned int opcode, - const u8 *data, size_t length) + const u8 *data, int length) { switch (opcode) { case AVC_OPCODE_VENDOR: break; @@ -141,7 +141,7 @@ static const char *debug_fcp_opcode(unsigned int opcode, return "Vendor"; } -static void debug_fcp(const u8 *data, size_t length) +static void debug_fcp(const u8 *data, int length) { unsigned int subunit_type, subunit_id, op; const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; -- cgit v0.10.2 From a9a9adfe2f99ddadfb574a098392a007970a1577 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 25 Mar 2009 17:21:34 +0100 Subject: netfilter: fix xt_LED build failure net/netfilter/xt_LED.c:40: error: field netfilter_led_trigger has incomplete type net/netfilter/xt_LED.c: In function led_timeout_callback: net/netfilter/xt_LED.c:78: warning: unused variable ledinternal net/netfilter/xt_LED.c: In function led_tg_check: net/netfilter/xt_LED.c:102: error: implicit declaration of function led_trigger_register net/netfilter/xt_LED.c: In function led_tg_destroy: net/netfilter/xt_LED.c:135: error: implicit declaration of function led_trigger_unregister Fix by adding a dependency on LED_TRIGGERS. Reported-by: Sachin Sant Tested-by: Subrata Modak Signed-off-by: Patrick McHardy diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 2562d05..2c967e4 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -374,7 +374,7 @@ config NETFILTER_XT_TARGET_HL config NETFILTER_XT_TARGET_LED tristate '"LED" target support' - depends on LEDS_CLASS + depends on LEDS_CLASS && LED_TRIGGERS depends on NETFILTER_ADVANCED help This option adds a `LED' target, which allows you to blink LEDs in -- cgit v0.10.2 From 78f3648601fdc7a8166748bbd6d0555a88efa24a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 25 Mar 2009 17:24:34 +0100 Subject: netfilter: nf_conntrack: use hlist_add_head_rcu() in nf_conntrack_set_hashsize() Using hlist_add_head() in nf_conntrack_set_hashsize() is quite dangerous. Without any barrier, one CPU could see a loop while doing its lookup. Its true new table cannot be seen by another cpu, but previous table is still readable. Signed-off-by: Eric Dumazet Signed-off-by: Patrick McHardy diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 55befe5..54e983f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1121,7 +1121,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) struct nf_conntrack_tuple_hash, hnode); hlist_del_rcu(&h->hnode); bucket = __hash_conntrack(&h->tuple, hashsize, rnd); - hlist_add_head(&h->hnode, &hash[bucket]); + hlist_add_head_rcu(&h->hnode, &hash[bucket]); } } old_size = nf_conntrack_htable_size; -- cgit v0.10.2 From b8dfe498775de912116f275680ddb57c8799d9ef Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 25 Mar 2009 17:31:52 +0100 Subject: netfilter: factorize ifname_compare() We use same not trivial helper function in four places. We can factorize it. Signed-off-by: Eric Dumazet Signed-off-by: Patrick McHardy diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index e8e08d0..72918b7 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -435,6 +435,29 @@ extern void xt_free_table_info(struct xt_table_info *info); extern void xt_table_entry_swap_rcu(struct xt_table_info *old, struct xt_table_info *new); +/* + * This helper is performance critical and must be inlined + */ +static inline unsigned long ifname_compare_aligned(const char *_a, + const char *_b, + const char *_mask) +{ + const unsigned long *a = (const unsigned long *)_a; + const unsigned long *b = (const unsigned long *)_b; + const unsigned long *mask = (const unsigned long *)_mask; + unsigned long ret; + + ret = (a[0] ^ b[0]) & mask[0]; + if (IFNAMSIZ > sizeof(unsigned long)) + ret |= (a[1] ^ b[1]) & mask[1]; + if (IFNAMSIZ > 2 * sizeof(unsigned long)) + ret |= (a[2] ^ b[2]) & mask[2]; + if (IFNAMSIZ > 3 * sizeof(unsigned long)) + ret |= (a[3] ^ b[3]) & mask[3]; + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); + return ret; +} + #ifdef CONFIG_COMPAT #include diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 64a7c6c..4b35dba 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -80,19 +80,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - const unsigned long *a = (const unsigned long *)_a; - const unsigned long *b = (const unsigned long *)_b; - const unsigned long *mask = (const unsigned long *)_mask; - unsigned long ret; - - ret = (a[0] ^ b[0]) & mask[0]; - if (IFNAMSIZ > sizeof(unsigned long)) - ret |= (a[1] ^ b[1]) & mask[1]; - if (IFNAMSIZ > 2 * sizeof(unsigned long)) - ret |= (a[2] ^ b[2]) & mask[2]; - if (IFNAMSIZ > 3 * sizeof(unsigned long)) - ret |= (a[3] ^ b[3]) & mask[3]; - BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); + unsigned long ret = ifname_compare_aligned(_a, _b, _mask); #else unsigned long ret = 0; int i; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index e5294ae..41c59e3 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -74,25 +74,6 @@ do { \ Hence the start of any table is given by get_table() below. */ -static unsigned long ifname_compare(const char *_a, const char *_b, - const unsigned char *_mask) -{ - const unsigned long *a = (const unsigned long *)_a; - const unsigned long *b = (const unsigned long *)_b; - const unsigned long *mask = (const unsigned long *)_mask; - unsigned long ret; - - ret = (a[0] ^ b[0]) & mask[0]; - if (IFNAMSIZ > sizeof(unsigned long)) - ret |= (a[1] ^ b[1]) & mask[1]; - if (IFNAMSIZ > 2 * sizeof(unsigned long)) - ret |= (a[2] ^ b[2]) & mask[2]; - if (IFNAMSIZ > 3 * sizeof(unsigned long)) - ret |= (a[3] ^ b[3]) & mask[3]; - BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); - return ret; -} - /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool @@ -121,7 +102,7 @@ ip_packet_match(const struct iphdr *ip, return false; } - ret = ifname_compare(indev, ipinfo->iniface, ipinfo->iniface_mask); + ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); if (FWINV(ret != 0, IPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", @@ -130,7 +111,7 @@ ip_packet_match(const struct iphdr *ip, return false; } - ret = ifname_compare(outdev, ipinfo->outiface, ipinfo->outiface_mask); + ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 34af7bb..e59662b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -89,25 +89,6 @@ ip6t_ext_hdr(u8 nexthdr) (nexthdr == IPPROTO_DSTOPTS) ); } -static unsigned long ifname_compare(const char *_a, const char *_b, - const unsigned char *_mask) -{ - const unsigned long *a = (const unsigned long *)_a; - const unsigned long *b = (const unsigned long *)_b; - const unsigned long *mask = (const unsigned long *)_mask; - unsigned long ret; - - ret = (a[0] ^ b[0]) & mask[0]; - if (IFNAMSIZ > sizeof(unsigned long)) - ret |= (a[1] ^ b[1]) & mask[1]; - if (IFNAMSIZ > 2 * sizeof(unsigned long)) - ret |= (a[2] ^ b[2]) & mask[2]; - if (IFNAMSIZ > 3 * sizeof(unsigned long)) - ret |= (a[3] ^ b[3]) & mask[3]; - BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); - return ret; -} - /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool @@ -138,7 +119,7 @@ ip6_packet_match(const struct sk_buff *skb, return false; } - ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask); + ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", @@ -147,7 +128,7 @@ ip6_packet_match(const struct sk_buff *skb, return false; } - ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask); + ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index 44a234e..8d28ca5 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -20,23 +20,6 @@ MODULE_DESCRIPTION("Xtables: Bridge physical device match"); MODULE_ALIAS("ipt_physdev"); MODULE_ALIAS("ip6t_physdev"); -static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) -{ - const unsigned long *a = (const unsigned long *)_a; - const unsigned long *b = (const unsigned long *)_b; - const unsigned long *mask = (const unsigned long *)_mask; - unsigned long ret; - - ret = (a[0] ^ b[0]) & mask[0]; - if (IFNAMSIZ > sizeof(unsigned long)) - ret |= (a[1] ^ b[1]) & mask[1]; - if (IFNAMSIZ > 2 * sizeof(unsigned long)) - ret |= (a[2] ^ b[2]) & mask[2]; - if (IFNAMSIZ > 3 * sizeof(unsigned long)) - ret |= (a[3] ^ b[3]) & mask[3]; - BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); - return ret; -} static bool physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par) @@ -85,7 +68,7 @@ physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par) if (!(info->bitmask & XT_PHYSDEV_OP_IN)) goto match_outdev; indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname; - ret = ifname_compare(indev, info->physindev, info->in_mask); + ret = ifname_compare_aligned(indev, info->physindev, info->in_mask); if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN)) return false; @@ -95,7 +78,7 @@ match_outdev: return true; outdev = nf_bridge->physoutdev ? nf_bridge->physoutdev->name : nulldevname; - ret = ifname_compare(outdev, info->physoutdev, info->out_mask); + ret = ifname_compare_aligned(outdev, info->physoutdev, info->out_mask); return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT)); } -- cgit v0.10.2 From d0dba7255b541f1651a88e75ebdb20dd45509c2f Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Wed, 25 Mar 2009 18:24:48 +0100 Subject: netfilter: ctnetlink: add callbacks to the per-proto nlattrs There is added a single callback for the l3 proto helper. The two callbacks for the l4 protos are necessary because of the general structure of a ctnetlink event, which is in short: CTA_TUPLE_ORIG CTA_TUPLE_REPLY CTA_ID ... CTA_PROTOINFO CTA_TUPLE_MASTER Therefore the formular is size := sizeof(generic-nlas) + 3 * sizeof(tuple_nlas) + sizeof(protoinfo_nlas) Some of the NLAs are optional, e. g. CTA_TUPLE_MASTER, which is only set if it's an expected connection. But the number of optional NLAs is small enough to prevent netlink_trim() from reallocating if calculated properly. Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 0378676..9f99d36 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -53,10 +53,17 @@ struct nf_conntrack_l3proto int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); + /* + * Calculate size of tuple nlattr + */ + int (*nlattr_tuple_size)(void); + int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; + size_t nla_size; + #ifdef CONFIG_SYSCTL struct ctl_table_header *ctl_table_header; struct ctl_path *ctl_table_path; diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index b01070b..a120990 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -64,16 +64,22 @@ struct nf_conntrack_l4proto /* convert protoinfo to nfnetink attributes */ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, const struct nf_conn *ct); + /* Calculate protoinfo nlattr size */ + int (*nlattr_size)(void); /* convert nfnetlink attributes to protoinfo */ int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); + /* Calculate tuple nlattr size */ + int (*nlattr_tuple_size)(void); int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; + size_t nla_size; + #ifdef CONFIG_SYSCTL struct ctl_table_header **ctl_table_header; struct ctl_table *ctl_table; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 9a62b4e..1a4568b 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -167,6 +167,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) if (proto->l3proto >= AF_MAX) return -EBUSY; + if (proto->tuple_to_nlattr && !proto->nlattr_tuple_size) + return -EINVAL; + mutex_lock(&nf_ct_proto_mutex); if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) { ret = -EBUSY; @@ -177,6 +180,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) if (ret < 0) goto out_unlock; + if (proto->nlattr_tuple_size) + proto->nla_size = 3 * proto->nlattr_tuple_size(); + rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto); out_unlock: @@ -263,6 +269,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) if (l4proto->l3proto >= PF_MAX) return -EBUSY; + if ((l4proto->to_nlattr && !l4proto->nlattr_size) + || (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size)) + return -EINVAL; + mutex_lock(&nf_ct_proto_mutex); if (!nf_ct_protos[l4proto->l3proto]) { /* l3proto may be loaded latter. */ @@ -290,6 +300,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) if (ret < 0) goto out_unlock; + l4proto->nla_size = 0; + if (l4proto->nlattr_size) + l4proto->nla_size += l4proto->nlattr_size(); + if (l4proto->nlattr_tuple_size) + l4proto->nla_size += 3 * l4proto->nlattr_tuple_size(); + rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], l4proto); -- cgit v0.10.2 From e487eb99cf9381a4f8254fa01747a85818da612b Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Wed, 25 Mar 2009 18:26:30 +0100 Subject: netlink: add nla_policy_len() It calculates the max. length of a Netlink policy, which is usefull for allocating Netlink buffers roughly the size of the actual message. Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/include/net/netlink.h b/include/net/netlink.h index 8a6150a..eddb502 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -230,6 +230,7 @@ extern int nla_validate(struct nlattr *head, int len, int maxtype, extern int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, const struct nla_policy *policy); +extern int nla_policy_len(const struct nla_policy *, int); extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype); extern size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); diff --git a/net/netlink/attr.c b/net/netlink/attr.c index 56c3ce7..ae32c57 100644 --- a/net/netlink/attr.c +++ b/net/netlink/attr.c @@ -133,6 +133,32 @@ errout: } /** + * nla_policy_len - Determin the max. length of a policy + * @policy: policy to use + * @n: number of policies + * + * Determines the max. length of the policy. It is currently used + * to allocated Netlink buffers roughly the size of the actual + * message. + * + * Returns 0 on success or a negative error code. + */ +int +nla_policy_len(const struct nla_policy *p, int n) +{ + int i, len = 0; + + for (i = 0; i < n; i++) { + if (p->len) + len += nla_total_size(p->len); + else if (nla_attr_minlen[p->type]) + len += nla_total_size(nla_attr_minlen[p->type]); + } + + return len; +} + +/** * nla_parse - Parse a stream of attributes into a tb buffer * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected @@ -456,6 +482,7 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data) } EXPORT_SYMBOL(nla_validate); +EXPORT_SYMBOL(nla_policy_len); EXPORT_SYMBOL(nla_parse); EXPORT_SYMBOL(nla_find); EXPORT_SYMBOL(nla_strlcpy); -- cgit v0.10.2 From af9d32ad6718b9a80fa89f557cc1fbb63a93ec15 Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Wed, 25 Mar 2009 18:44:01 +0100 Subject: netfilter: limit the length of the helper name This is necessary in order to have an upper bound for Netlink message calculation, which is not a problem at all, as there are no helpers with a longer name. Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 66d65a7..ee2a4b3 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -14,6 +14,8 @@ struct module; +#define NF_CT_HELPER_NAME_LEN 16 + struct nf_conntrack_helper { struct hlist_node hnode; /* Internal use. */ diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a51bdac..805cfdd 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -142,6 +142,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) BUG_ON(me->expect_policy == NULL); BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); + BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); mutex_lock(&nf_ct_helper_mutex); hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); -- cgit v0.10.2 From 1f9352ae2253a97b07b34dcf16ffa3b4ca12c558 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 25 Mar 2009 19:26:35 +0100 Subject: netfilter: {ip,ip6,arp}_tables: fix incorrect loop detection Commit e1b4b9f ([NETFILTER]: {ip,ip6,arp}_tables: fix exponential worst-case search for loops) introduced a regression in the loop detection algorithm, causing sporadic incorrectly detected loops. When a chain has already been visited during the check, it is treated as having a standard target containing a RETURN verdict directly at the beginning in order to not check it again. The real target of the first rule is then incorrectly treated as STANDARD target and checked not to contain invalid verdicts. Fix by making sure the rule does actually contain a standard target. Based on patch by Francis Dupont Signed-off-by: Patrick McHardy diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4b35dba..4f454ce 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -388,7 +388,9 @@ static int mark_source_chains(struct xt_table_info *newinfo, && unconditional(&e->arp)) || visited) { unsigned int oldpos, size; - if (t->verdict < -NF_MAX_VERDICT - 1) { + if ((strcmp(t->target.u.user.name, + ARPT_STANDARD_TARGET) == 0) && + t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 41c59e3..82ee7c9 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -488,7 +488,9 @@ mark_source_chains(struct xt_table_info *newinfo, && unconditional(&e->ip)) || visited) { unsigned int oldpos, size; - if (t->verdict < -NF_MAX_VERDICT - 1) { + if ((strcmp(t->target.u.user.name, + IPT_STANDARD_TARGET) == 0) && + t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e59662b..e89cfa3 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -517,7 +517,9 @@ mark_source_chains(struct xt_table_info *newinfo, && unconditional(&e->ipv6)) || visited) { unsigned int oldpos, size; - if (t->verdict < -NF_MAX_VERDICT - 1) { + if ((strcmp(t->target.u.user.name, + IP6T_STANDARD_TARGET) == 0) && + t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); -- cgit v0.10.2 From ea781f197d6a835cbb93a0bf88ee1696296ed8aa Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 25 Mar 2009 21:05:46 +0100 Subject: netfilter: nf_conntrack: use SLAB_DESTROY_BY_RCU and get rid of call_rcu() Use "hlist_nulls" infrastructure we added in 2.6.29 for RCUification of UDP & TCP. This permits an easy conversion from call_rcu() based hash lists to a SLAB_DESTROY_BY_RCU one. Avoiding call_rcu() delay at nf_conn freeing time has numerous gains. First, it doesnt fill RCU queues (up to 10000 elements per cpu). This reduces OOM possibility, if queued elements are not taken into account This reduces latency problems when RCU queue size hits hilimit and triggers emergency mode. - It allows fast reuse of just freed elements, permitting better use of CPU cache. - We delete rcu_head from "struct nf_conn", shrinking size of this structure by 8 or 16 bytes. This patch only takes care of "struct nf_conn". call_rcu() is still used for less critical conntrack parts, that may be converted later if necessary. Signed-off-by: Eric Dumazet Signed-off-by: Patrick McHardy diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4dfb793..6c3f964 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -91,8 +91,7 @@ struct nf_conn_help { #include #include -struct nf_conn -{ +struct nf_conn { /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, plus 1 for any connection(s) we are `master' for */ struct nf_conntrack ct_general; @@ -126,7 +125,6 @@ struct nf_conn #ifdef CONFIG_NET_NS struct net *ct_net; #endif - struct rcu_head rcu; }; static inline struct nf_conn * @@ -190,9 +188,13 @@ static inline void nf_ct_put(struct nf_conn *ct) extern int nf_ct_l3proto_try_module_get(unsigned short l3proto); extern void nf_ct_l3proto_module_put(unsigned short l3proto); -extern struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced); -extern void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, - unsigned int size); +/* + * Allocate a hashtable of hlist_head (if nulls == 0), + * or hlist_nulls_head (if nulls == 1) + */ +extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls); + +extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size); extern struct nf_conntrack_tuple_hash * __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple); diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index f2f6aa7..2628c15 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -12,6 +12,7 @@ #include #include +#include /* A `tuple' is a structure containing the information to uniquely identify a connection. ie. if two packets have the same tuple, they @@ -146,9 +147,8 @@ static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t) ((enum ip_conntrack_dir)(h)->tuple.dst.dir) /* Connections have two entries in the hash table: one for each way */ -struct nf_conntrack_tuple_hash -{ - struct hlist_node hnode; +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; struct nf_conntrack_tuple tuple; }; diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index f4498a6..9dc5840 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -2,6 +2,7 @@ #define __NETNS_CONNTRACK_H #include +#include #include struct ctl_table_header; @@ -10,9 +11,9 @@ struct nf_conntrack_ecache; struct netns_ct { atomic_t count; unsigned int expect_count; - struct hlist_head *hash; + struct hlist_nulls_head *hash; struct hlist_head *expect_hash; - struct hlist_head unconfirmed; + struct hlist_nulls_head unconfirmed; struct ip_conntrack_stat *stat; #ifdef CONFIG_NF_CONNTRACK_EVENTS struct nf_conntrack_ecache *ecache; diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 6ba5c55..8668a3d 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -25,40 +25,42 @@ struct ct_iter_state { unsigned int bucket; }; -static struct hlist_node *ct_get_first(struct seq_file *seq) +static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) { struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; - struct hlist_node *n; + struct hlist_nulls_node *n; for (st->bucket = 0; st->bucket < nf_conntrack_htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); - if (n) + if (!is_a_nulls(n)) return n; } return NULL; } -static struct hlist_node *ct_get_next(struct seq_file *seq, - struct hlist_node *head) +static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, + struct hlist_nulls_node *head) { struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; head = rcu_dereference(head->next); - while (head == NULL) { - if (++st->bucket >= nf_conntrack_htable_size) - return NULL; + while (is_a_nulls(head)) { + if (likely(get_nulls_value(head) == st->bucket)) { + if (++st->bucket >= nf_conntrack_htable_size) + return NULL; + } head = rcu_dereference(net->ct.hash[st->bucket].first); } return head; } -static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) +static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) { - struct hlist_node *head = ct_get_first(seq); + struct hlist_nulls_node *head = ct_get_first(seq); if (head) while (pos && (head = ct_get_next(seq, head))) @@ -87,69 +89,76 @@ static void ct_seq_stop(struct seq_file *s, void *v) static int ct_seq_show(struct seq_file *s, void *v) { - const struct nf_conntrack_tuple_hash *hash = v; - const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); + struct nf_conntrack_tuple_hash *hash = v; + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; + int ret = 0; NF_CT_ASSERT(ct); + if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) + return 0; + /* we only want to print DIR_ORIGINAL */ if (NF_CT_DIRECTION(hash)) - return 0; + goto release; if (nf_ct_l3num(ct) != AF_INET) - return 0; + goto release; l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); NF_CT_ASSERT(l3proto); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); NF_CT_ASSERT(l4proto); + ret = -ENOSPC; if (seq_printf(s, "%-8s %u %ld ", l4proto->name, nf_ct_protonum(ct), timer_pending(&ct->timeout) ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) - return -ENOSPC; + goto release; if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) - return -ENOSPC; + goto release; if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, l3proto, l4proto)) - return -ENOSPC; + goto release; if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) - return -ENOSPC; + goto release; if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) if (seq_printf(s, "[UNREPLIED] ")) - return -ENOSPC; + goto release; if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l3proto, l4proto)) - return -ENOSPC; + goto release; if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) - return -ENOSPC; + goto release; if (test_bit(IPS_ASSURED_BIT, &ct->status)) if (seq_printf(s, "[ASSURED] ")) - return -ENOSPC; + goto release; #ifdef CONFIG_NF_CONNTRACK_MARK if (seq_printf(s, "mark=%u ", ct->mark)) - return -ENOSPC; + goto release; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK if (seq_printf(s, "secmark=%u ", ct->secmark)) - return -ENOSPC; + goto release; #endif if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) - return -ENOSPC; - - return 0; + goto release; + ret = 0; +release: + nf_ct_put(ct); + return ret; } static const struct seq_operations ct_seq_ops = { diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index a65cf69..fe65187 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -679,7 +679,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, static int __net_init nf_nat_net_init(struct net *net) { net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, - &net->ipv4.nat_vmalloced); + &net->ipv4.nat_vmalloced, 0); if (!net->ipv4.nat_bysource) return -ENOMEM; return 0; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 54e983f..c55bbdc 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -163,8 +164,8 @@ static void clean_from_lists(struct nf_conn *ct) { pr_debug("clean_from_lists(%p)\n", ct); - hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); - hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); @@ -204,8 +205,8 @@ destroy_conntrack(struct nf_conntrack *nfct) /* We overload first tuple to link into unconfirmed list. */ if (!nf_ct_is_confirmed(ct)) { - BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode)); - hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); + BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); } NF_CT_STAT_INC(net, delete); @@ -242,18 +243,26 @@ static void death_by_timeout(unsigned long ul_conntrack) nf_ct_put(ct); } +/* + * Warning : + * - Caller must take a reference on returned object + * and recheck nf_ct_tuple_equal(tuple, &h->tuple) + * OR + * - Caller must lock nf_conntrack_lock before calling this function + */ struct nf_conntrack_tuple_hash * __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; - struct hlist_node *n; + struct hlist_nulls_node *n; unsigned int hash = hash_conntrack(tuple); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. */ local_bh_disable(); - hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { +begin: + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { if (nf_ct_tuple_equal(tuple, &h->tuple)) { NF_CT_STAT_INC(net, found); local_bh_enable(); @@ -261,6 +270,13 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) } NF_CT_STAT_INC(net, searched); } + /* + * if the nulls value we got at the end of this lookup is + * not the expected one, we must restart lookup. + * We probably met an item that was moved to another chain. + */ + if (get_nulls_value(n) != hash) + goto begin; local_bh_enable(); return NULL; @@ -275,11 +291,18 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) struct nf_conn *ct; rcu_read_lock(); +begin: h = __nf_conntrack_find(net, tuple); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; + else { + if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { + nf_ct_put(ct); + goto begin; + } + } } rcu_read_unlock(); @@ -293,9 +316,9 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, { struct net *net = nf_ct_net(ct); - hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, + hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &net->ct.hash[hash]); - hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, + hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &net->ct.hash[repl_hash]); } @@ -318,7 +341,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct nf_conn_help *help; - struct hlist_node *n; + struct hlist_nulls_node *n; enum ip_conntrack_info ctinfo; struct net *net; @@ -350,17 +373,17 @@ __nf_conntrack_confirm(struct sk_buff *skb) /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ - hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode) + hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &h->tuple)) goto out; - hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode) + hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, &h->tuple)) goto out; /* Remove from unconfirmed list */ - hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); __nf_conntrack_hash_insert(ct, hash, repl_hash); /* Timer relative to confirmation time, not original @@ -399,14 +422,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, { struct net *net = nf_ct_net(ignored_conntrack); struct nf_conntrack_tuple_hash *h; - struct hlist_node *n; + struct hlist_nulls_node *n; unsigned int hash = hash_conntrack(tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. */ rcu_read_lock_bh(); - hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && nf_ct_tuple_equal(tuple, &h->tuple)) { NF_CT_STAT_INC(net, found); @@ -430,14 +453,14 @@ static noinline int early_drop(struct net *net, unsigned int hash) /* Use oldest entry, which is roughly LRU */ struct nf_conntrack_tuple_hash *h; struct nf_conn *ct = NULL, *tmp; - struct hlist_node *n; + struct hlist_nulls_node *n; unsigned int i, cnt = 0; int dropped = 0; rcu_read_lock(); for (i = 0; i < nf_conntrack_htable_size; i++) { - hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], - hnode) { + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], + hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) ct = tmp; @@ -508,27 +531,19 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, #ifdef CONFIG_NET_NS ct->ct_net = net; #endif - INIT_RCU_HEAD(&ct->rcu); return ct; } EXPORT_SYMBOL_GPL(nf_conntrack_alloc); -static void nf_conntrack_free_rcu(struct rcu_head *head) -{ - struct nf_conn *ct = container_of(head, struct nf_conn, rcu); - - nf_ct_ext_free(ct); - kmem_cache_free(nf_conntrack_cachep, ct); -} - void nf_conntrack_free(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); nf_ct_ext_destroy(ct); atomic_dec(&net->ct.count); - call_rcu(&ct->rcu, nf_conntrack_free_rcu); + nf_ct_ext_free(ct); + kmem_cache_free(nf_conntrack_cachep, ct); } EXPORT_SYMBOL_GPL(nf_conntrack_free); @@ -594,7 +609,7 @@ init_conntrack(struct net *net, } /* Overload tuple linked list to put us in unconfirmed list. */ - hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, + hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &net->ct.unconfirmed); spin_unlock_bh(&nf_conntrack_lock); @@ -934,17 +949,17 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; - struct hlist_node *n; + struct hlist_nulls_node *n; spin_lock_bh(&nf_conntrack_lock); for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { - hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) { + hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) goto found; } } - hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) { + hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) set_bit(IPS_DYING_BIT, &ct->status); @@ -992,7 +1007,7 @@ static int kill_all(struct nf_conn *i, void *data) return 1; } -void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size) +void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) { if (vmalloced) vfree(hash); @@ -1060,26 +1075,28 @@ void nf_conntrack_cleanup(struct net *net) } } -struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) +void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) { - struct hlist_head *hash; - unsigned int size, i; + struct hlist_nulls_head *hash; + unsigned int nr_slots, i; + size_t sz; *vmalloced = 0; - size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); - hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, - get_order(sizeof(struct hlist_head) - * size)); + BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); + nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); + sz = nr_slots * sizeof(struct hlist_nulls_head); + hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, + get_order(sz)); if (!hash) { *vmalloced = 1; printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); - hash = vmalloc(sizeof(struct hlist_head) * size); + hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); } - if (hash) - for (i = 0; i < size; i++) - INIT_HLIST_HEAD(&hash[i]); + if (hash && nulls) + for (i = 0; i < nr_slots; i++) + INIT_HLIST_NULLS_HEAD(&hash[i], i); return hash; } @@ -1090,7 +1107,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) int i, bucket, vmalloced, old_vmalloced; unsigned int hashsize, old_size; int rnd; - struct hlist_head *hash, *old_hash; + struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; /* On boot, we can set this without any fancy locking. */ @@ -1101,7 +1118,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) if (!hashsize) return -EINVAL; - hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced); + hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); if (!hash) return -ENOMEM; @@ -1116,12 +1133,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) */ spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_conntrack_htable_size; i++) { - while (!hlist_empty(&init_net.ct.hash[i])) { - h = hlist_entry(init_net.ct.hash[i].first, - struct nf_conntrack_tuple_hash, hnode); - hlist_del_rcu(&h->hnode); + while (!hlist_nulls_empty(&init_net.ct.hash[i])) { + h = hlist_nulls_entry(init_net.ct.hash[i].first, + struct nf_conntrack_tuple_hash, hnnode); + hlist_nulls_del_rcu(&h->hnnode); bucket = __hash_conntrack(&h->tuple, hashsize, rnd); - hlist_add_head_rcu(&h->hnode, &hash[bucket]); + hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } old_size = nf_conntrack_htable_size; @@ -1172,7 +1189,7 @@ static int nf_conntrack_init_init_net(void) nf_conntrack_cachep = kmem_cache_create("nf_conntrack", sizeof(struct nf_conn), - 0, 0, NULL); + 0, SLAB_DESTROY_BY_RCU, NULL); if (!nf_conntrack_cachep) { printk(KERN_ERR "Unable to create nf_conn slab cache\n"); ret = -ENOMEM; @@ -1202,7 +1219,7 @@ static int nf_conntrack_init_net(struct net *net) int ret; atomic_set(&net->ct.count, 0); - INIT_HLIST_HEAD(&net->ct.unconfirmed); + INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0); net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat) { ret = -ENOMEM; @@ -1212,7 +1229,7 @@ static int nf_conntrack_init_net(struct net *net) if (ret < 0) goto err_ecache; net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, - &net->ct.hash_vmalloc); + &net->ct.hash_vmalloc, 1); if (!net->ct.hash) { ret = -ENOMEM; printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 357ba39..3940f99 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -604,7 +604,7 @@ int nf_conntrack_expect_init(struct net *net) net->ct.expect_count = 0; net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, - &net->ct.expect_vmalloc); + &net->ct.expect_vmalloc, 0); if (net->ct.expect_hash == NULL) goto err1; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 805cfdd..30b8e90 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -159,6 +159,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, struct nf_conntrack_tuple_hash *h; struct nf_conntrack_expect *exp; const struct hlist_node *n, *next; + const struct hlist_nulls_node *nn; unsigned int i; /* Get rid of expectations */ @@ -175,10 +176,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, } /* Get rid of expecteds, set helpers to NULL. */ - hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) + hlist_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) unhelp(h, me); for (i = 0; i < nf_conntrack_htable_size; i++) { - hlist_for_each_entry(h, n, &net->ct.hash[i], hnode) + hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) unhelp(h, me); } } @@ -218,7 +219,7 @@ int nf_conntrack_helper_init(void) nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, - &nf_ct_helper_vmalloc); + &nf_ct_helper_vmalloc, 0); if (!nf_ct_helper_hash) return -ENOMEM; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 1b75c9e..349bbef 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -536,7 +537,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct nf_conn *ct, *last; struct nf_conntrack_tuple_hash *h; - struct hlist_node *n; + struct hlist_nulls_node *n; struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; @@ -544,27 +545,27 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: - hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], - hnode) { + hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], + hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); + if (!atomic_inc_not_zero(&ct->ct_general.use)) + continue; /* Dump entries of a given L3 protocol number. * If it is not specified, ie. l3proto == 0, * then dump everything. */ if (l3proto && nf_ct_l3num(ct) != l3proto) - continue; + goto releasect; if (cb->args[1]) { if (ct != last) - continue; + goto releasect; cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, IPCTNL_MSG_CT_NEW, 1, ct) < 0) { - if (!atomic_inc_not_zero(&ct->ct_general.use)) - continue; cb->args[1] = (unsigned long)ct; goto out; } @@ -577,6 +578,8 @@ restart: if (acct) memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); } +releasect: + nf_ct_put(ct); } if (cb->args[1]) { cb->args[1] = 0; @@ -1242,13 +1245,12 @@ ctnetlink_create_conntrack(struct nlattr *cda[], if (err < 0) goto err2; - master_h = __nf_conntrack_find(&init_net, &master); + master_h = nf_conntrack_find_get(&init_net, &master); if (master_h == NULL) { err = -ENOENT; goto err2; } master_ct = nf_ct_tuplehash_to_ctrack(master_h); - nf_conntrack_get(&master_ct->ct_general); __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = master_ct; } diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 4da54b0..1935153 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -44,40 +44,42 @@ struct ct_iter_state { unsigned int bucket; }; -static struct hlist_node *ct_get_first(struct seq_file *seq) +static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) { struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; - struct hlist_node *n; + struct hlist_nulls_node *n; for (st->bucket = 0; st->bucket < nf_conntrack_htable_size; st->bucket++) { n = rcu_dereference(net->ct.hash[st->bucket].first); - if (n) + if (!is_a_nulls(n)) return n; } return NULL; } -static struct hlist_node *ct_get_next(struct seq_file *seq, - struct hlist_node *head) +static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, + struct hlist_nulls_node *head) { struct net *net = seq_file_net(seq); struct ct_iter_state *st = seq->private; head = rcu_dereference(head->next); - while (head == NULL) { - if (++st->bucket >= nf_conntrack_htable_size) - return NULL; + while (is_a_nulls(head)) { + if (likely(get_nulls_value(head) == st->bucket)) { + if (++st->bucket >= nf_conntrack_htable_size) + return NULL; + } head = rcu_dereference(net->ct.hash[st->bucket].first); } return head; } -static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) +static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) { - struct hlist_node *head = ct_get_first(seq); + struct hlist_nulls_node *head = ct_get_first(seq); if (head) while (pos && (head = ct_get_next(seq, head))) @@ -107,67 +109,74 @@ static void ct_seq_stop(struct seq_file *s, void *v) /* return 0 on success, 1 in case of error */ static int ct_seq_show(struct seq_file *s, void *v) { - const struct nf_conntrack_tuple_hash *hash = v; - const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); + struct nf_conntrack_tuple_hash *hash = v; + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; + int ret = 0; NF_CT_ASSERT(ct); + if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) + return 0; /* we only want to print DIR_ORIGINAL */ if (NF_CT_DIRECTION(hash)) - return 0; + goto release; l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); NF_CT_ASSERT(l3proto); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); NF_CT_ASSERT(l4proto); + ret = -ENOSPC; if (seq_printf(s, "%-8s %u %-8s %u %ld ", l3proto->name, nf_ct_l3num(ct), l4proto->name, nf_ct_protonum(ct), timer_pending(&ct->timeout) ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) - return -ENOSPC; + goto release; if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) - return -ENOSPC; + goto release; if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, l3proto, l4proto)) - return -ENOSPC; + goto release; if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) - return -ENOSPC; + goto release; if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) if (seq_printf(s, "[UNREPLIED] ")) - return -ENOSPC; + goto release; if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l3proto, l4proto)) - return -ENOSPC; + goto release; if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) - return -ENOSPC; + goto release; if (test_bit(IPS_ASSURED_BIT, &ct->status)) if (seq_printf(s, "[ASSURED] ")) - return -ENOSPC; + goto release; #if defined(CONFIG_NF_CONNTRACK_MARK) if (seq_printf(s, "mark=%u ", ct->mark)) - return -ENOSPC; + goto release; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK if (seq_printf(s, "secmark=%u ", ct->secmark)) - return -ENOSPC; + goto release; #endif if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) - return -ENOSPC; + goto release; + ret = 0; +release: + nf_ct_put(ct); return 0; } diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 7f404cc..6809809 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -108,7 +108,7 @@ static int count_them(struct xt_connlimit_data *data, const struct nf_conntrack_tuple_hash *found; struct xt_connlimit_conn *conn; struct xt_connlimit_conn *tmp; - const struct nf_conn *found_ct; + struct nf_conn *found_ct; struct list_head *hash; bool addit = true; int matches = 0; @@ -123,7 +123,7 @@ static int count_them(struct xt_connlimit_data *data, /* check the saved connections */ list_for_each_entry_safe(conn, tmp, hash, list) { - found = __nf_conntrack_find(&init_net, &conn->tuple); + found = nf_conntrack_find_get(&init_net, &conn->tuple); found_ct = NULL; if (found != NULL) @@ -151,6 +151,7 @@ static int count_them(struct xt_connlimit_data *data, * we do not care about connections which are * closed already -> ditch it */ + nf_ct_put(found_ct); list_del(&conn->list); kfree(conn); continue; @@ -160,6 +161,7 @@ static int count_them(struct xt_connlimit_data *data, match->family)) /* same source network -> be counted! */ ++matches; + nf_ct_put(found_ct); } rcu_read_unlock(); -- cgit v0.10.2 From 2732c4e45bb67006fdc9ae6669be866762711ab5 Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Wed, 25 Mar 2009 21:50:59 +0100 Subject: netfilter: ctnetlink: allocate right-sized ctnetlink skb Try to allocate a Netlink skb roughly the size of the actual message, with the help from the l3 and l4 protocol helpers. This is all to prevent a reallocation in netlink_trim() later. The overhead of allocating the right-sized skb is rather small, with ctnetlink_alloc_skb() actually being inlined away on my x86_64 box. The size of the per-proto space is determined at registration time of the protocol helper. Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 349bbef..03547c6 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -405,6 +405,69 @@ nla_put_failure: } #ifdef CONFIG_NF_CONNTRACK_EVENTS +/* + * The general structure of a ctnetlink event is + * + * CTA_TUPLE_ORIG + * + * CTA_TUPLE_REPLY + * + * CTA_ID + * ... + * CTA_PROTOINFO + * + * CTA_TUPLE_MASTER + * + * + * Therefore the formular is + * + * size = sizeof(headers) + sizeof(generic_nlas) + 3 * sizeof(tuple_nlas) + * + sizeof(protoinfo_nlas) + */ +static struct sk_buff * +ctnetlink_alloc_skb(const struct nf_conntrack_tuple *tuple, gfp_t gfp) +{ + struct nf_conntrack_l3proto *l3proto; + struct nf_conntrack_l4proto *l4proto; + int len; + +#define NLA_TYPE_SIZE(type) nla_total_size(sizeof(type)) + + /* proto independant part */ + len = NLMSG_SPACE(sizeof(struct nfgenmsg)) + + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ + + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ + + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ + + 3 * NLA_TYPE_SIZE(u_int8_t) /* CTA_PROTO_NUM */ + + NLA_TYPE_SIZE(u_int32_t) /* CTA_ID */ + + NLA_TYPE_SIZE(u_int32_t) /* CTA_STATUS */ + + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ + + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_PACKETS */ + + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_BYTES */ + + NLA_TYPE_SIZE(u_int32_t) /* CTA_TIMEOUT */ + + nla_total_size(0) /* CTA_PROTOINFO */ + + nla_total_size(0) /* CTA_HELP */ + + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ + + NLA_TYPE_SIZE(u_int32_t) /* CTA_SECMARK */ + + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_POS */ + + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_BEFORE */ + + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_AFTER */ + + NLA_TYPE_SIZE(u_int32_t); /* CTA_MARK */ + +#undef NLA_TYPE_SIZE + + rcu_read_lock(); + l3proto = __nf_ct_l3proto_find(tuple->src.l3num); + len += l3proto->nla_size; + + l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); + len += l4proto->nla_size; + rcu_read_unlock(); + + return alloc_skb(len, gfp); +} + static int ctnetlink_conntrack_event(struct notifier_block *this, unsigned long events, void *ptr) { @@ -438,7 +501,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, if (!item->report && !nfnetlink_has_listeners(group)) return NOTIFY_DONE; - skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); + skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC); if (!skb) return NOTIFY_DONE; -- cgit v0.10.2 From 5c0de29d06318ec8f6e3ba0d17d62529dbbdc1e8 Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Wed, 25 Mar 2009 21:52:17 +0100 Subject: netfilter: nf_conntrack: add generic function to get len of generic policy Usefull for all protocols which do not add additional data, such as GRE or UDPlite. Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index a120990..ba32ed7 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -113,6 +113,7 @@ extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple); extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t); +extern int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; #ifdef CONFIG_SYSCTL diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c55bbdc..b182b30 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -921,6 +921,12 @@ int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], return 0; } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); + +int nf_ct_port_nlattr_tuple_size(void) +{ + return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); +} +EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ -- cgit v0.10.2 From a400c30edb1958ceb53c4b8ce78989189b36df47 Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Wed, 25 Mar 2009 21:53:39 +0100 Subject: netfilter: nf_conntrack: calculate per-protocol nlattr size Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 8b681f2..7d2ead7 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -328,6 +328,11 @@ static int ipv4_nlattr_to_tuple(struct nlattr *tb[], return 0; } + +static int ipv4_nlattr_tuple_size(void) +{ + return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1); +} #endif static struct nf_sockopt_ops so_getorigdst = { @@ -347,6 +352,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { .get_l4proto = ipv4_get_l4proto, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = ipv4_tuple_to_nlattr, + .nlattr_tuple_size = ipv4_nlattr_tuple_size, .nlattr_to_tuple = ipv4_nlattr_to_tuple, .nla_policy = ipv4_nla_policy, #endif diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 2a8bee2..23b2c2e 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -262,6 +262,11 @@ static int icmp_nlattr_to_tuple(struct nlattr *tb[], return 0; } + +static int icmp_nlattr_tuple_size(void) +{ + return nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1); +} #endif #ifdef CONFIG_SYSCTL @@ -309,6 +314,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = .me = NULL, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = icmp_tuple_to_nlattr, + .nlattr_tuple_size = icmp_nlattr_tuple_size, .nlattr_to_tuple = icmp_nlattr_to_tuple, .nla_policy = icmp_nla_policy, #endif diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index e6852f6..2a15c2d 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -342,6 +342,11 @@ static int ipv6_nlattr_to_tuple(struct nlattr *tb[], return 0; } + +static int ipv6_nlattr_tuple_size(void) +{ + return nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1); +} #endif struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { @@ -353,6 +358,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { .get_l4proto = ipv6_get_l4proto, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = ipv6_tuple_to_nlattr, + .nlattr_tuple_size = ipv6_nlattr_tuple_size, .nlattr_to_tuple = ipv6_nlattr_to_tuple, .nla_policy = ipv6_nla_policy, #endif diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 165b256..032fdf4 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -268,6 +268,11 @@ static int icmpv6_nlattr_to_tuple(struct nlattr *tb[], return 0; } + +static int icmpv6_nlattr_tuple_size(void) +{ + return nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1); +} #endif #ifdef CONFIG_SYSCTL @@ -299,6 +304,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = .error = icmpv6_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = icmpv6_tuple_to_nlattr, + .nlattr_tuple_size = icmpv6_nlattr_tuple_size, .nlattr_to_tuple = icmpv6_nlattr_to_tuple, .nla_policy = icmpv6_nla_policy, #endif diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index d3d5a7f..50dac8db 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -669,6 +669,12 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) write_unlock_bh(&dccp_lock); return 0; } + +static int dccp_nlattr_size(void) +{ + return nla_total_size(0) /* CTA_PROTOINFO_DCCP */ + + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1); +} #endif #ifdef CONFIG_SYSCTL @@ -749,8 +755,10 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { .print_conntrack = dccp_print_conntrack, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = dccp_to_nlattr, + .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif @@ -771,6 +779,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { .to_nlattr = dccp_to_nlattr, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 1b279f9..117b801 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -293,6 +293,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { .me = THIS_MODULE, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 74e0379..101b4ad 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -537,6 +537,12 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) return 0; } + +static int sctp_nlattr_size(void) +{ + return nla_total_size(0) /* CTA_PROTOINFO_SCTP */ + + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1); +} #endif #ifdef CONFIG_SYSCTL @@ -668,8 +674,10 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .me = THIS_MODULE, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = sctp_to_nlattr, + .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif @@ -696,8 +704,10 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { .me = THIS_MODULE, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = sctp_to_nlattr, + .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7d3944f..9b9e671 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1183,6 +1183,17 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) return 0; } + +static int tcp_nlattr_size(void) +{ + return nla_total_size(0) /* CTA_PROTOINFO_TCP */ + + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1); +} + +static int tcp_nlattr_tuple_size(void) +{ + return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); +} #endif #ifdef CONFIG_SYSCTL @@ -1398,9 +1409,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = .error = tcp_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = tcp_to_nlattr, + .nlattr_size = tcp_nlattr_size, .from_nlattr = nlattr_to_tcp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nlattr_tuple_size = tcp_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL @@ -1428,9 +1441,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly = .error = tcp_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .to_nlattr = tcp_to_nlattr, + .nlattr_size = tcp_nlattr_size, .from_nlattr = nlattr_to_tcp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nlattr_tuple_size = tcp_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index d402117..70809d1 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -195,6 +195,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL @@ -222,6 +223,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 4579d8d..4614696 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -180,6 +180,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = .error = udplite_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif -- cgit v0.10.2 From f18df228997fb716990590d248663981a15f17d4 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Tue, 13 Jan 2009 16:43:09 +0100 Subject: quota: Add quota reservation support Delayed allocation defers the block allocation at the dirty pages flush-out time, doing quota charge/check at that time is too late. But we can't charge the quota blocks until blocks are really allocated, otherwise users could get overcharged after reboot from system crash. This patch adds quota reservation for delayed allocation. Quota blocks are reserved in memory, inode and quota won't gets dirtied until later block allocation time. Signed-off-by: Mingming Cao Signed-off-by: Jan Kara diff --git a/fs/dquot.c b/fs/dquot.c index bca3cac..9b1c4d3 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -899,6 +899,11 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_curspace += number; } +static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace += number; +} + static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || @@ -1068,7 +1073,11 @@ err_out: kfree_skb(skb); } #endif - +/* + * Write warnings to the console and send warning messages over netlink. + * + * Note that this function can sleep. + */ static inline void flush_warnings(struct dquot * const *dquots, char *warntype) { int i; @@ -1129,13 +1138,18 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) /* needs dq_data_lock */ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { + qsize_t tspace; + *warntype = QUOTA_NL_NOWARN; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; + tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace + + space; + if (dquot->dq_dqb.dqb_bhardlimit && - dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit && + tspace > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = QUOTA_NL_BHARDWARN; @@ -1143,7 +1157,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war } if (dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) @@ -1152,7 +1166,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war } if (dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = QUOTA_NL_BSOFTWARN; @@ -1306,51 +1320,92 @@ void vfs_dq_drop(struct inode *inode) /* * This operation can block, but only after everything is updated */ -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) +int __dquot_alloc_space(struct inode *inode, qsize_t number, + int warn, int reserve) { - int cnt, ret = NO_QUOTA; + int cnt, ret = QUOTA_OK; char warntype[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) { -out_add: - inode_add_bytes(inode, number); - return QUOTA_OK; - } for (cnt = 0; cnt < MAXQUOTAS; cnt++) warntype[cnt] = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */ - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - goto out_add; - } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (inode->i_dquot[cnt] == NODQUOT) continue; - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) - goto warn_put_all; + if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) + == NO_QUOTA) { + ret = NO_QUOTA; + goto out_unlock; + } } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (inode->i_dquot[cnt] == NODQUOT) continue; - dquot_incr_space(inode->i_dquot[cnt], number); + if (reserve) + dquot_resv_space(inode->i_dquot[cnt], number); + else + dquot_incr_space(inode->i_dquot[cnt], number); } - inode_add_bytes(inode, number); - ret = QUOTA_OK; -warn_put_all: + if (!reserve) + inode_add_bytes(inode, number); +out_unlock: spin_unlock(&dq_data_lock); - if (ret == QUOTA_OK) - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); flush_warnings(inode->i_dquot, warntype); + return ret; +} + +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) +{ + int cnt, ret = QUOTA_OK; + + /* + * First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex + */ + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out_unlock; + } + + ret = __dquot_alloc_space(inode, number, warn, 0); + if (ret == NO_QUOTA) + goto out_unlock; + + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); +out_unlock: up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} + +int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) +{ + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + ret = __dquot_alloc_space(inode, number, warn, 1); +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: return ret; } +EXPORT_SYMBOL(dquot_reserve_space); /* * This operation can block, but only after everything is updated @@ -2057,7 +2112,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) spin_lock(&dq_data_lock); di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); - di->dqb_curspace = dm->dqb_curspace; + di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; di->dqb_ihardlimit = dm->dqb_ihardlimit; di->dqb_isoftlimit = dm->dqb_isoftlimit; di->dqb_curinodes = dm->dqb_curinodes; @@ -2097,7 +2152,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) spin_lock(&dq_data_lock); if (di->dqb_valid & QIF_SPACE) { - dm->dqb_curspace = di->dqb_curspace; + dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; check_blim = 1; __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); } diff --git a/include/linux/quota.h b/include/linux/quota.h index d72d5d8..54b837f 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -198,6 +198,7 @@ struct mem_dqblk { qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ qsize_t dqb_curspace; /* current used space */ + qsize_t dqb_rsvspace; /* current reserved space for delalloc*/ qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ qsize_t dqb_isoftlimit; /* preferred inode limit */ qsize_t dqb_curinodes; /* current # allocated inodes */ @@ -308,6 +309,8 @@ struct dquot_operations { int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ + /* reserve quota for delayed block allocation */ + int (*reserve_space) (struct inode *, qsize_t, int); }; /* Operations handling requests from userspace */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 0b35b3a..3e3a0d2 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -183,6 +183,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) return ret; } +static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) { + /* Used space is updated in alloc_space() */ + if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) + return 1; + } + return 0; +} + static inline int vfs_dq_alloc_inode(struct inode *inode) { if (sb_any_quota_active(inode->i_sb)) { @@ -339,6 +349,11 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) return 0; } +static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) +{ + return 0; +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { inode_sub_bytes(inode, nr); @@ -376,6 +391,12 @@ static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) nr << inode->i_sb->s_blocksize_bits); } +static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr) +{ + return vfs_dq_reserve_space(inode, + nr << inode->i_blkbits); +} + static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); -- cgit v0.10.2 From 740d9dcd949a986c88886a591054a0cdb89ef669 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Tue, 13 Jan 2009 16:43:14 +0100 Subject: quota: Add quota reservation claim and released operations Reserved quota will be claimed at the block allocation time. Over-booked quota could be returned back with the release callback function. Signed-off-by: Mingming Cao Signed-off-by: Jan Kara diff --git a/fs/dquot.c b/fs/dquot.c index 9b1c4d3..2916f91 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -904,6 +904,23 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_rsvspace += number; } +/* + * Claim reserved quota space + */ +static void dquot_claim_reserved_space(struct dquot *dquot, + qsize_t number) +{ + WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); + dquot->dq_dqb.dqb_curspace += number; + dquot->dq_dqb.dqb_rsvspace -= number; +} + +static inline +void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace -= number; +} + static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || @@ -1452,6 +1469,72 @@ warn_put_all: return ret; } +int dquot_claim_space(struct inode *inode, qsize_t number) +{ + int cnt; + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + inode_add_bytes(inode, number); + goto out; + } + + spin_lock(&dq_data_lock); + /* Claim reserved quotas to allocated quotas */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_claim_reserved_space(inode->i_dquot[cnt], + number); + } + /* Update inode bytes */ + inode_add_bytes(inode, number); + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_claim_space); + +/* + * Release reserved quota space + */ +void dquot_release_reserved_space(struct inode *inode, qsize_t number) +{ + int cnt; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + spin_lock(&dq_data_lock); + /* Release reserved dquots */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_free_reserved_space(inode->i_dquot[cnt], number); + } + spin_unlock(&dq_data_lock); + +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return; +} +EXPORT_SYMBOL(dquot_release_reserved_space); + /* * This operation can block, but only after everything is updated */ @@ -1529,6 +1612,19 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) } /* + * call back function, get reserved quota space from underlying fs + */ +qsize_t dquot_get_reserved_space(struct inode *inode) +{ + qsize_t reserved_space = 0; + + if (sb_any_quota_active(inode->i_sb) && + inode->i_sb->dq_op->get_reserved_space) + reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); + return reserved_space; +} + +/* * Transfer the number of inode and blocks from one diskquota to an other. * * This operation can block, but only after everything is updated @@ -1536,7 +1632,8 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) */ int dquot_transfer(struct inode *inode, struct iattr *iattr) { - qsize_t space; + qsize_t space, cur_space; + qsize_t rsv_space = 0; struct dquot *transfer_from[MAXQUOTAS]; struct dquot *transfer_to[MAXQUOTAS]; int cnt, ret = QUOTA_OK; @@ -1575,7 +1672,9 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) goto put_all; } spin_lock(&dq_data_lock); - space = inode_get_bytes(inode); + cur_space = inode_get_bytes(inode); + rsv_space = dquot_get_reserved_space(inode); + space = cur_space + rsv_space; /* Build the transfer_from list and check the limits */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (transfer_to[cnt] == NODQUOT) @@ -1604,11 +1703,14 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) warntype_from_space[cnt] = info_bdq_free(transfer_from[cnt], space); dquot_decr_inodes(transfer_from[cnt], 1); - dquot_decr_space(transfer_from[cnt], space); + dquot_decr_space(transfer_from[cnt], cur_space); + dquot_free_reserved_space(transfer_from[cnt], + rsv_space); } dquot_incr_inodes(transfer_to[cnt], 1); - dquot_incr_space(transfer_to[cnt], space); + dquot_incr_space(transfer_to[cnt], cur_space); + dquot_resv_space(transfer_to[cnt], rsv_space); inode->i_dquot[cnt] = transfer_to[cnt]; } diff --git a/include/linux/quota.h b/include/linux/quota.h index 54b837f..a510d91 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -311,6 +311,12 @@ struct dquot_operations { int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ /* reserve quota for delayed block allocation */ int (*reserve_space) (struct inode *, qsize_t, int); + /* claim reserved quota for delayed alloc */ + int (*claim_space) (struct inode *, qsize_t); + /* release rsved quota for delayed alloc */ + void (*release_rsv) (struct inode *, qsize_t); + /* get reserved quota for delayed alloc */ + qsize_t (*get_reserved_space) (struct inode *); }; /* Operations handling requests from userspace */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 3e3a0d2..7369d04 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot); int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); int dquot_alloc_inode(const struct inode *inode, qsize_t number); +int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc); +int dquot_claim_space(struct inode *inode, qsize_t number); +void dquot_release_reserved_space(struct inode *inode, qsize_t number); +qsize_t dquot_get_reserved_space(struct inode *inode); + int dquot_free_space(struct inode *inode, qsize_t number); int dquot_free_inode(const struct inode *inode, qsize_t number); @@ -203,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode) return 0; } +/* + * Convert in-memory reserved quotas to real consumed quotas + */ +static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) { + if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) + return 1; + } else + inode_add_bytes(inode, nr); + + mark_inode_dirty(inode); + return 0; +} + +/* + * Release reserved (in-memory) quotas + */ +static inline +void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) + inode->i_sb->dq_op->release_rsv(inode, nr); +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_active(inode->i_sb)) @@ -354,6 +384,17 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) return 0; } +static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) +{ + return vfs_dq_alloc_space(inode, nr); +} + +static inline +int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) +{ + return 0; +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { inode_sub_bytes(inode, nr); @@ -397,6 +438,18 @@ static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr) nr << inode->i_blkbits); } +static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr) +{ + return vfs_dq_claim_space(inode, + nr << inode->i_blkbits); +} + +static inline +void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr) +{ + vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits); +} + static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); -- cgit v0.10.2 From 9900ba3487f9ba392db30e12d210f768a90abb13 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Wed, 14 Jan 2009 16:18:57 +0100 Subject: quota: Use inode->i_blkbits to get block bits Andrew has suggested to use inode->i_blkbits to get the block bits info, rather than use super block's blockbits. That should be faster and emit less code. Signed-off-by: Mingming Cao Signed-off-by: Jan Kara diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 7369d04..69b502e 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -410,38 +410,32 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) { - return vfs_dq_prealloc_space_nodirty(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits); } static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) { - return vfs_dq_prealloc_space(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits); } static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) { - return vfs_dq_alloc_space_nodirty(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits); } static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) { - return vfs_dq_alloc_space(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_alloc_space(inode, nr << inode->i_blkbits); } static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr) { - return vfs_dq_reserve_space(inode, - nr << inode->i_blkbits); + return vfs_dq_reserve_space(inode, nr << inode->i_blkbits); } static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr) { - return vfs_dq_claim_space(inode, - nr << inode->i_blkbits); + return vfs_dq_claim_space(inode, nr << inode->i_blkbits); } static inline @@ -452,12 +446,12 @@ void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr) static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { - vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits); } static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) { - vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space(inode, nr << inode->i_blkbits); } /* -- cgit v0.10.2 From 08d0350ce9d63b19b62498c7b6421c2a95246b95 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Wed, 14 Jan 2009 16:19:32 +0100 Subject: quota: Move EXPORT_SYMBOL immediately next to the functions/varibles According to checkpatch: EXPORT_SYMBOL(foo); should immediately follow its function/variable Signed-off-by: Mingming Cao Signed-off-by: Jan Kara diff --git a/fs/dquot.c b/fs/dquot.c index 2916f91..28aa146 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -132,6 +132,7 @@ static DEFINE_SPINLOCK(dq_list_lock); static DEFINE_SPINLOCK(dq_state_lock); DEFINE_SPINLOCK(dq_data_lock); +EXPORT_SYMBOL(dq_data_lock); static char *quotatypes[] = INITQFNAMES; static struct quota_format_type *quota_formats; /* List of registered formats */ @@ -148,6 +149,7 @@ int register_quota_format(struct quota_format_type *fmt) spin_unlock(&dq_list_lock); return 0; } +EXPORT_SYMBOL(register_quota_format); void unregister_quota_format(struct quota_format_type *fmt) { @@ -159,6 +161,7 @@ void unregister_quota_format(struct quota_format_type *fmt) *actqf = (*actqf)->qf_next; spin_unlock(&dq_list_lock); } +EXPORT_SYMBOL(unregister_quota_format); static struct quota_format_type *find_quota_format(int id) { @@ -215,6 +218,7 @@ static unsigned int dq_hash_bits, dq_hash_mask; static struct hlist_head *dquot_hash; struct dqstats dqstats; +EXPORT_SYMBOL(dqstats); static inline unsigned int hashfn(const struct super_block *sb, unsigned int id, int type) @@ -309,6 +313,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) spin_unlock(&dq_list_lock); return 0; } +EXPORT_SYMBOL(dquot_mark_dquot_dirty); /* This function needs dq_list_lock */ static inline int clear_dquot_dirty(struct dquot *dquot) @@ -360,6 +365,7 @@ out_iolock: mutex_unlock(&dquot->dq_lock); return ret; } +EXPORT_SYMBOL(dquot_acquire); /* * Write dquot to disk @@ -389,6 +395,7 @@ out_sem: mutex_unlock(&dqopt->dqio_mutex); return ret; } +EXPORT_SYMBOL(dquot_commit); /* * Release dquot @@ -417,6 +424,7 @@ out_dqlock: mutex_unlock(&dquot->dq_lock); return ret; } +EXPORT_SYMBOL(dquot_release); void dquot_destroy(struct dquot *dquot) { @@ -516,6 +524,7 @@ out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return ret; } +EXPORT_SYMBOL(dquot_scan_active); int vfs_quota_sync(struct super_block *sb, int type) { @@ -563,6 +572,7 @@ int vfs_quota_sync(struct super_block *sb, int type) return 0; } +EXPORT_SYMBOL(vfs_quota_sync); /* Free unused dquots from cache */ static void prune_dqcache(int count) @@ -672,6 +682,7 @@ we_slept: put_dquot_last(dquot); spin_unlock(&dq_list_lock); } +EXPORT_SYMBOL(dqput); struct dquot *dquot_alloc(struct super_block *sb, int type) { @@ -767,6 +778,7 @@ out: return dquot; } +EXPORT_SYMBOL(dqget); static int dqinit_needed(struct inode *inode, int type) { @@ -1282,6 +1294,7 @@ out_err: dqput(got[cnt]); return ret; } +EXPORT_SYMBOL(dquot_initialize); /* * Release all quotas referenced by inode @@ -1302,6 +1315,7 @@ int dquot_drop(struct inode *inode) dqput(put[cnt]); return 0; } +EXPORT_SYMBOL(dquot_drop); /* Wrapper to remove references to quota structures from inode */ void vfs_dq_drop(struct inode *inode) @@ -1324,6 +1338,7 @@ void vfs_dq_drop(struct inode *inode) inode->i_sb->dq_op->drop(inode); } } +EXPORT_SYMBOL(vfs_dq_drop); /* * Following four functions update i_blocks+i_bytes fields and @@ -1404,6 +1419,7 @@ out_unlock: out: return ret; } +EXPORT_SYMBOL(dquot_alloc_space); int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) { @@ -1468,6 +1484,7 @@ warn_put_all: up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); return ret; } +EXPORT_SYMBOL(dquot_alloc_inode); int dquot_claim_space(struct inode *inode, qsize_t number) { @@ -1574,6 +1591,7 @@ out_sub: up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); return QUOTA_OK; } +EXPORT_SYMBOL(dquot_free_space); /* * This operation can block, but only after everything is updated @@ -1610,6 +1628,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); return QUOTA_OK; } +EXPORT_SYMBOL(dquot_free_inode); /* * call back function, get reserved quota space from underlying fs @@ -1746,6 +1765,7 @@ over_quota: ret = NO_QUOTA; goto warn_put_all; } +EXPORT_SYMBOL(dquot_transfer); /* Wrapper for transferring ownership of an inode */ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) @@ -1757,7 +1777,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) } return 0; } - +EXPORT_SYMBOL(vfs_dq_transfer); /* * Write info of quota file to disk @@ -1772,6 +1792,7 @@ int dquot_commit_info(struct super_block *sb, int type) mutex_unlock(&dqopt->dqio_mutex); return ret; } +EXPORT_SYMBOL(dquot_commit_info); /* * Definitions of diskquota operations. @@ -1924,13 +1945,14 @@ put_inodes: } return ret; } +EXPORT_SYMBOL(vfs_quota_disable); int vfs_quota_off(struct super_block *sb, int type, int remount) { return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); } - +EXPORT_SYMBOL(vfs_quota_off); /* * Turn quotas on on a device */ @@ -2087,6 +2109,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id, DQUOT_LIMITS_ENABLED); return error; } +EXPORT_SYMBOL(vfs_quota_on_path); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, int remount) @@ -2104,6 +2127,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, } return error; } +EXPORT_SYMBOL(vfs_quota_on); /* * More powerful function for turning on quotas allowing setting @@ -2150,6 +2174,7 @@ out_lock: load_quota: return vfs_load_quota_inode(inode, type, format_id, flags); } +EXPORT_SYMBOL(vfs_quota_enable); /* * This function is used when filesystem needs to initialize quotas @@ -2179,6 +2204,7 @@ out: dput(dentry); return error; } +EXPORT_SYMBOL(vfs_quota_on_mount); /* Wrapper to turn on quotas when remounting rw */ int vfs_dq_quota_on_remount(struct super_block *sb) @@ -2195,6 +2221,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb) } return ret; } +EXPORT_SYMBOL(vfs_dq_quota_on_remount); static inline qsize_t qbtos(qsize_t blocks) { @@ -2236,6 +2263,7 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d return 0; } +EXPORT_SYMBOL(vfs_get_dqblk); /* Generic routine for setting common part of quota structure */ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) @@ -2327,6 +2355,7 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d out: return rc; } +EXPORT_SYMBOL(vfs_set_dqblk); /* Generic routine for getting common part of quota file information */ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) @@ -2348,6 +2377,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } +EXPORT_SYMBOL(vfs_get_dqinfo); /* Generic routine for setting common part of quota file information */ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) @@ -2376,6 +2406,7 @@ out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return err; } +EXPORT_SYMBOL(vfs_set_dqinfo); struct quotactl_ops vfs_quotactl_ops = { .quota_on = vfs_quota_on, @@ -2531,37 +2562,3 @@ static int __init dquot_init(void) return 0; } module_init(dquot_init); - -EXPORT_SYMBOL(register_quota_format); -EXPORT_SYMBOL(unregister_quota_format); -EXPORT_SYMBOL(dqstats); -EXPORT_SYMBOL(dq_data_lock); -EXPORT_SYMBOL(vfs_quota_enable); -EXPORT_SYMBOL(vfs_quota_on); -EXPORT_SYMBOL(vfs_quota_on_path); -EXPORT_SYMBOL(vfs_quota_on_mount); -EXPORT_SYMBOL(vfs_quota_disable); -EXPORT_SYMBOL(vfs_quota_off); -EXPORT_SYMBOL(dquot_scan_active); -EXPORT_SYMBOL(vfs_quota_sync); -EXPORT_SYMBOL(vfs_get_dqinfo); -EXPORT_SYMBOL(vfs_set_dqinfo); -EXPORT_SYMBOL(vfs_get_dqblk); -EXPORT_SYMBOL(vfs_set_dqblk); -EXPORT_SYMBOL(dquot_commit); -EXPORT_SYMBOL(dquot_commit_info); -EXPORT_SYMBOL(dquot_acquire); -EXPORT_SYMBOL(dquot_release); -EXPORT_SYMBOL(dquot_mark_dquot_dirty); -EXPORT_SYMBOL(dquot_initialize); -EXPORT_SYMBOL(dquot_drop); -EXPORT_SYMBOL(vfs_dq_drop); -EXPORT_SYMBOL(dqget); -EXPORT_SYMBOL(dqput); -EXPORT_SYMBOL(dquot_alloc_space); -EXPORT_SYMBOL(dquot_alloc_inode); -EXPORT_SYMBOL(dquot_free_space); -EXPORT_SYMBOL(dquot_free_inode); -EXPORT_SYMBOL(dquot_transfer); -EXPORT_SYMBOL(vfs_dq_transfer); -EXPORT_SYMBOL(vfs_dq_quota_on_remount); -- cgit v0.10.2 From a219ce3748bbc596cec85c44754b3f6b994f1e1d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Jan 2009 19:03:19 +0100 Subject: ext3: Remove unnecessary quota functions ext3_dquot_initialize() and ext3_dquot_drop() is no longer needed because of modified quota locking. Signed-off-by: Jan Kara diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 4a97041..41e6ae6 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) -static int ext3_dquot_initialize(struct inode *inode, int type); -static int ext3_dquot_drop(struct inode *inode); static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); @@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static struct dquot_operations ext3_quota_operations = { - .initialize = ext3_dquot_initialize, - .drop = ext3_dquot_drop, + .initialize = dquot_initialize, + .drop = dquot_drop, .alloc_space = dquot_alloc_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, @@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot) return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } -static int ext3_dquot_initialize(struct inode *inode, int type) -{ - handle_t *handle; - int ret, err; - - /* We may create quota structure so we need to reserve enough blocks */ - handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - ret = dquot_initialize(inode, type); - err = ext3_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - -static int ext3_dquot_drop(struct inode *inode) -{ - handle_t *handle; - int ret, err; - - /* We may delete quota structure so we need to reserve enough blocks */ - handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) { - /* - * We call dquot_drop() anyway to at least release references - * to quota structures so that umount does not hang. - */ - dquot_drop(inode); - return PTR_ERR(handle); - } - ret = dquot_drop(inode); - err = ext3_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - static int ext3_write_dquot(struct dquot *dquot) { int ret, err; -- cgit v0.10.2 From edf7245362f7b8b8c76c4a6cad3604bf80884848 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Jan 2009 19:05:26 +0100 Subject: ext4: Remove unnecessary quota functions ext4_dquot_initialize() and ext4_dquot_drop() is no longer needed because of modified quota locking. Signed-off-by: Jan Kara diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 39d1993..41f8794 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_ #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) -static int ext4_dquot_initialize(struct inode *inode, int type); -static int ext4_dquot_drop(struct inode *inode); static int ext4_write_dquot(struct dquot *dquot); static int ext4_acquire_dquot(struct dquot *dquot); static int ext4_release_dquot(struct dquot *dquot); @@ -942,8 +940,8 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static struct dquot_operations ext4_quota_operations = { - .initialize = ext4_dquot_initialize, - .drop = ext4_dquot_drop, + .initialize = dquot_initialize, + .drop = dquot_drop, .alloc_space = dquot_alloc_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, @@ -3380,44 +3378,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot) return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; } -static int ext4_dquot_initialize(struct inode *inode, int type) -{ - handle_t *handle; - int ret, err; - - /* We may create quota structure so we need to reserve enough blocks */ - handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - ret = dquot_initialize(inode, type); - err = ext4_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - -static int ext4_dquot_drop(struct inode *inode) -{ - handle_t *handle; - int ret, err; - - /* We may delete quota structure so we need to reserve enough blocks */ - handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (IS_ERR(handle)) { - /* - * We call dquot_drop() anyway to at least release references - * to quota structures so that umount does not hang. - */ - dquot_drop(inode); - return PTR_ERR(handle); - } - ret = dquot_drop(inode); - err = ext4_journal_stop(handle); - if (!ret) - ret = err; - return ret; -} - static int ext4_write_dquot(struct dquot *dquot) { int ret, err; -- cgit v0.10.2 From 643d00ccc311664188c8209bf8b596a30e139c3a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Jan 2009 19:07:14 +0100 Subject: reiserfs: Remove unnecessary quota functions reiserfs_dquot_initialize() and reiserfs_dquot_drop() is no longer needed because of modified quota locking. Signed-off-by: Jan Kara diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index f3c820b..317c035 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = { #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") -static int reiserfs_dquot_initialize(struct inode *, int); -static int reiserfs_dquot_drop(struct inode *); static int reiserfs_write_dquot(struct dquot *); static int reiserfs_acquire_dquot(struct dquot *); static int reiserfs_release_dquot(struct dquot *); @@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int); static int reiserfs_quota_on(struct super_block *, int, int, char *, int); static struct dquot_operations reiserfs_quota_operations = { - .initialize = reiserfs_dquot_initialize, - .drop = reiserfs_dquot_drop, + .initialize = dquot_initialize, + .drop = dquot_drop, .alloc_space = dquot_alloc_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, @@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf) } #ifdef CONFIG_QUOTA -static int reiserfs_dquot_initialize(struct inode *inode, int type) -{ - struct reiserfs_transaction_handle th; - int ret, err; - - /* We may create quota structure so we need to reserve enough blocks */ - reiserfs_write_lock(inode->i_sb); - ret = - journal_begin(&th, inode->i_sb, - 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (ret) - goto out; - ret = dquot_initialize(inode, type); - err = - journal_end(&th, inode->i_sb, - 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb)); - if (!ret && err) - ret = err; - out: - reiserfs_write_unlock(inode->i_sb); - return ret; -} - -static int reiserfs_dquot_drop(struct inode *inode) -{ - struct reiserfs_transaction_handle th; - int ret, err; - - /* We may delete quota structure so we need to reserve enough blocks */ - reiserfs_write_lock(inode->i_sb); - ret = - journal_begin(&th, inode->i_sb, - 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (ret) { - /* - * We call dquot_drop() anyway to at least release references - * to quota structures so that umount does not hang. - */ - dquot_drop(inode); - goto out; - } - ret = dquot_drop(inode); - err = - journal_end(&th, inode->i_sb, - 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)); - if (!ret && err) - ret = err; - out: - reiserfs_write_unlock(inode->i_sb); - return ret; -} - static int reiserfs_write_dquot(struct dquot *dquot) { struct reiserfs_transaction_handle th; -- cgit v0.10.2 From 60e58e0f30e723464c2a7d34b71b8675566c572d Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Thu, 22 Jan 2009 18:13:05 +0100 Subject: ext4: quota reservation for delayed allocation Uses quota reservation/claim/release to handle quota properly for delayed allocation in the three steps: 1) quotas are reserved when data being copied to cache when block allocation is defered 2) when new blocks are allocated. reserved quotas are converted to the real allocated quota, 2) over-booked quotas for metadata blocks are released back. Signed-off-by: Mingming Cao Acked-by: "Theodore Ts'o" Signed-off-by: Jan Kara diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b0c87dc..6083bb38 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "ext4_i.h" /* @@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); extern int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page); +extern qsize_t ext4_get_reserved_space(struct inode *inode); /* ioctl.c */ extern long ext4_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c7fed5b..8290cfb 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -975,6 +975,17 @@ out: return err; } +qsize_t ext4_get_reserved_space(struct inode *inode) +{ + unsigned long long total; + + spin_lock(&EXT4_I(inode)->i_block_reservation_lock); + total = EXT4_I(inode)->i_reserved_data_blocks + + EXT4_I(inode)->i_reserved_meta_blocks; + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + + return total; +} /* * Calculate the number of metadata blocks need to reserve * to allocate @blocks for non extent file based file @@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) /* update per-inode reservations */ BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); EXT4_I(inode)->i_reserved_data_blocks -= used; - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + + /* + * free those over-booking quota for metadata blocks + */ + + if (mdb_free) + vfs_dq_release_reservation_block(inode, mdb_free); } /* @@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file, static int ext4_da_reserve_space(struct inode *inode, int nrblocks) { int retries = 0; - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - unsigned long md_needed, mdblocks, total = 0; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + unsigned long md_needed, mdblocks, total = 0; /* * recalculate the amount of metadata blocks to reserve @@ -1570,12 +1587,23 @@ repeat: md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; total = md_needed + nrblocks; + /* + * Make quota reservation here to prevent quota overflow + * later. Real quota accounting is done at pages writeout + * time. + */ + if (vfs_dq_reserve_block(inode, total)) { + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + return -EDQUOT; + } + if (ext4_claim_free_blocks(sbi, total)) { spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { yield(); goto repeat; } + vfs_dq_release_reservation_block(inode, total); return -ENOSPC; } EXT4_I(inode)->i_reserved_data_blocks += nrblocks; @@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free) BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); EXT4_I(inode)->i_reserved_meta_blocks = mdb; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + + vfs_dq_release_reservation_block(inode, release); } static void ext4_da_page_release_reservation(struct page *page, diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 9f61e62..4de4209 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); - else + else { percpu_counter_sub(&sbi->s_dirtyblocks_counter, ac->ac_b_ex.fe_len); + /* convert reserved quota blocks to real quota blocks */ + vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len); + } if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, @@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; - unsigned int inquota; + unsigned int inquota = 0; unsigned int reserv_blks = 0; sb = ar->inode->i_sb; @@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, (unsigned long long) ar->pleft, (unsigned long long) ar->pright); - if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) { - /* - * With delalloc we already reserved the blocks + /* + * For delayed allocation, we could skip the ENOSPC and + * EDQUOT check, as blocks and quotas have been already + * reserved when data being copied into pagecache. + */ + if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) + ar->flags |= EXT4_MB_DELALLOC_RESERVED; + else { + /* Without delayed allocation we need to verify + * there is enough free blocks to do block allocation + * and verify allocation doesn't exceed the quota limits. */ while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { /* let others to free the space */ @@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, return 0; } reserv_blks = ar->len; + while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { + ar->flags |= EXT4_MB_HINT_NOPREALLOC; + ar->len--; + } + inquota = ar->len; + if (ar->len == 0) { + *errp = -EDQUOT; + goto out3; + } } - while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { - ar->flags |= EXT4_MB_HINT_NOPREALLOC; - ar->len--; - } - if (ar->len == 0) { - *errp = -EDQUOT; - goto out3; - } - inquota = ar->len; - - if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) - ar->flags |= EXT4_MB_DELALLOC_RESERVED; ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); if (!ac) { @@ -4654,7 +4662,7 @@ repeat: out2: kmem_cache_free(ext4_ac_cachep, ac); out1: - if (ar->len < inquota) + if (inquota && ar->len < inquota) DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); out3: if (!ar->len) { diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 41f8794..5a238e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -943,6 +943,10 @@ static struct dquot_operations ext4_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, + .reserve_space = dquot_reserve_space, + .claim_space = dquot_claim_space, + .release_rsv = dquot_release_reserved_space, + .get_reserved_space = ext4_get_reserved_space, .alloc_inode = dquot_alloc_inode, .free_space = dquot_free_space, .free_inode = dquot_free_inode, -- cgit v0.10.2 From 884d179dff3aa98a73c3ba9dee05fd6050d664f0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 15:28:09 +0100 Subject: quota: Move quota files into separate directory Quota subsystem has more and more files. It's time to create a dir for it. Signed-off-by: Jan Kara diff --git a/fs/Kconfig b/fs/Kconfig index 93945dd..cef8b18 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -56,61 +56,7 @@ endif # BLOCK source "fs/notify/Kconfig" -config QUOTA - bool "Quota support" - help - If you say Y here, you will be able to set per user limits for disk - usage (also called disk quotas). Currently, it works for the - ext2, ext3, and reiserfs file system. ext3 also supports journalled - quotas for which you don't need to run quotacheck(8) after an unclean - shutdown. - For further details, read the Quota mini-HOWTO, available from - , or the documentation provided - with the quota tools. Probably the quota support is only useful for - multi user systems. If unsure, say N. - -config QUOTA_NETLINK_INTERFACE - bool "Report quota messages through netlink interface" - depends on QUOTA && NET - help - If you say Y here, quota warnings (about exceeding softlimit, reaching - hardlimit, etc.) will be reported through netlink interface. If unsure, - say Y. - -config PRINT_QUOTA_WARNING - bool "Print quota warnings to console (OBSOLETE)" - depends on QUOTA - default y - help - If you say Y here, quota warnings (about exceeding softlimit, reaching - hardlimit, etc.) will be printed to the process' controlling terminal. - Note that this behavior is currently deprecated and may go away in - future. Please use notification via netlink socket instead. - -# Generic support for tree structured quota files. Seleted when needed. -config QUOTA_TREE - tristate - -config QFMT_V1 - tristate "Old quota format support" - depends on QUOTA - help - This quota format was (is) used by kernels earlier than 2.4.22. If - you have quota working and you don't want to convert to new quota - format say Y here. - -config QFMT_V2 - tristate "Quota format v2 support" - depends on QUOTA - select QUOTA_TREE - help - This quota format allows using quotas with 32-bit UIDs/GIDs. If you - need this functionality say Y here. - -config QUOTACTL - bool - depends on XFS_QUOTA || QUOTA - default y +source "fs/quota/Kconfig" source "fs/autofs/Kconfig" source "fs/autofs4/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index dc20db3..6e82a30 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o obj-$(CONFIG_NFS_COMMON) += nfs_common/ obj-$(CONFIG_GENERIC_ACL) += generic_acl.o -obj-$(CONFIG_QUOTA) += dquot.o -obj-$(CONFIG_QFMT_V1) += quota_v1.o -obj-$(CONFIG_QFMT_V2) += quota_v2.o -obj-$(CONFIG_QUOTA_TREE) += quota_tree.o -obj-$(CONFIG_QUOTACTL) += quota.o +obj-y += quota/ obj-$(CONFIG_PROC_FS) += proc/ obj-y += partitions/ diff --git a/fs/dquot.c b/fs/dquot.c deleted file mode 100644 index 28aa146..0000000 --- a/fs/dquot.c +++ /dev/null @@ -1,2564 +0,0 @@ -/* - * Implementation of the diskquota system for the LINUX operating system. QUOTA - * is implemented using the BSD system call interface as the means of - * communication with the user level. This file contains the generic routines - * called by the different filesystems on allocation of an inode or block. - * These routines take care of the administration needed to have a consistent - * diskquota tracking system. The ideas of both user and group quotas are based - * on the Melbourne quota system as used on BSD derived systems. The internal - * implementation is based on one of the several variants of the LINUX - * inode-subsystem with added complexity of the diskquota system. - * - * Author: Marco van Wieringen - * - * Fixes: Dmitry Gorodchanin , 11 Feb 96 - * - * Revised list management to avoid races - * -- Bill Hawes, , 9/98 - * - * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). - * As the consequence the locking was moved from dquot_decr_...(), - * dquot_incr_...() to calling functions. - * invalidate_dquots() now writes modified dquots. - * Serialized quota_off() and quota_on() for mount point. - * Fixed a few bugs in grow_dquots(). - * Fixed deadlock in write_dquot() - we no longer account quotas on - * quota files - * remove_dquot_ref() moved to inode.c - it now traverses through inodes - * add_dquot_ref() restarts after blocking - * Added check for bogus uid and fixed check for group in quotactl. - * Jan Kara, , sponsored by SuSE CR, 10-11/99 - * - * Used struct list_head instead of own list struct - * Invalidation of referenced dquots is no longer possible - * Improved free_dquots list management - * Quota and i_blocks are now updated in one place to avoid races - * Warnings are now delayed so we won't block in critical section - * Write updated not to require dquot lock - * Jan Kara, , 9/2000 - * - * Added dynamic quota structure allocation - * Jan Kara 12/2000 - * - * Rewritten quota interface. Implemented new quota format and - * formats registering. - * Jan Kara, , 2001,2002 - * - * New SMP locking. - * Jan Kara, , 10/2002 - * - * Added journalled quota support, fix lock inversion problems - * Jan Kara, , 2003,2004 - * - * (C) Copyright 1994 - 1997 Marco van Wieringen - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for inode_lock, oddly enough.. */ -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE -#include -#include -#endif - -#include - -#define __DQUOT_PARANOIA - -/* - * There are three quota SMP locks. dq_list_lock protects all lists with quotas - * and quota formats, dqstats structure containing statistics about the lists - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects - * modifications of quota state (on quotaon and quotaoff) and readers who care - * about latest values take it as well. - * - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, - * dq_list_lock > dq_state_lock - * - * Note that some things (eg. sb pointer, type, id) doesn't change during - * the life of the dquot structure and so needn't to be protected by a lock - * - * Any operation working on dquots via inode pointers must hold dqptr_sem. If - * operation is just reading pointers from inode (or not using them at all) the - * read lock is enough. If pointers are altered function must hold write lock - * (these locking rules also apply for S_NOQUOTA flag in the inode - note that - * for altering the flag i_mutex is also needed). - * - * Each dquot has its dq_lock mutex. Locked dquots might not be referenced - * from inodes (dquot_alloc_space() and such don't check the dq_lock). - * Currently dquot is locked only when it is being read to memory (or space for - * it is being allocated) on the first dqget() and when it is being released on - * the last dqput(). The allocation and release oparations are serialized by - * the dq_lock and by checking the use count in dquot_release(). Write - * operations on dquots don't hold dq_lock as they copy data under dq_data_lock - * spinlock to internal buffers before writing. - * - * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > - * dqio_mutex - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > - * dqptr_sem. But filesystem has to count with the fact that functions such as - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called - * from inside a transaction to keep filesystem consistency after a crash. Also - * filesystems usually want to do some IO on dquot from ->mark_dirty which is - * called with dqptr_sem held. - * i_mutex on quota files is special (it's below dqio_mutex) - */ - -static DEFINE_SPINLOCK(dq_list_lock); -static DEFINE_SPINLOCK(dq_state_lock); -DEFINE_SPINLOCK(dq_data_lock); -EXPORT_SYMBOL(dq_data_lock); - -static char *quotatypes[] = INITQFNAMES; -static struct quota_format_type *quota_formats; /* List of registered formats */ -static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; - -/* SLAB cache for dquot structures */ -static struct kmem_cache *dquot_cachep; - -int register_quota_format(struct quota_format_type *fmt) -{ - spin_lock(&dq_list_lock); - fmt->qf_next = quota_formats; - quota_formats = fmt; - spin_unlock(&dq_list_lock); - return 0; -} -EXPORT_SYMBOL(register_quota_format); - -void unregister_quota_format(struct quota_format_type *fmt) -{ - struct quota_format_type **actqf; - - spin_lock(&dq_list_lock); - for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); - if (*actqf) - *actqf = (*actqf)->qf_next; - spin_unlock(&dq_list_lock); -} -EXPORT_SYMBOL(unregister_quota_format); - -static struct quota_format_type *find_quota_format(int id) -{ - struct quota_format_type *actqf; - - spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); - if (!actqf || !try_module_get(actqf->qf_owner)) { - int qm; - - spin_unlock(&dq_list_lock); - - for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); - if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) - return NULL; - - spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); - if (actqf && !try_module_get(actqf->qf_owner)) - actqf = NULL; - } - spin_unlock(&dq_list_lock); - return actqf; -} - -static void put_quota_format(struct quota_format_type *fmt) -{ - module_put(fmt->qf_owner); -} - -/* - * Dquot List Management: - * The quota code uses three lists for dquot management: the inuse_list, - * free_dquots, and dquot_hash[] array. A single dquot structure may be - * on all three lists, depending on its current state. - * - * All dquots are placed to the end of inuse_list when first created, and this - * list is used for invalidate operation, which must look at every dquot. - * - * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, - * and this list is searched whenever we need an available dquot. Dquots are - * removed from the list as soon as they are used again, and - * dqstats.free_dquots gives the number of dquots on the list. When - * dquot is invalidated it's completely released from memory. - * - * Dquots with a specific identity (device, type and id) are placed on - * one of the dquot_hash[] hash chains. The provides an efficient search - * mechanism to locate a specific dquot. - */ - -static LIST_HEAD(inuse_list); -static LIST_HEAD(free_dquots); -static unsigned int dq_hash_bits, dq_hash_mask; -static struct hlist_head *dquot_hash; - -struct dqstats dqstats; -EXPORT_SYMBOL(dqstats); - -static inline unsigned int -hashfn(const struct super_block *sb, unsigned int id, int type) -{ - unsigned long tmp; - - tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); - return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; -} - -/* - * Following list functions expect dq_list_lock to be held - */ -static inline void insert_dquot_hash(struct dquot *dquot) -{ - struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); - hlist_add_head(&dquot->dq_hash, head); -} - -static inline void remove_dquot_hash(struct dquot *dquot) -{ - hlist_del_init(&dquot->dq_hash); -} - -static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) -{ - struct hlist_node *node; - struct dquot *dquot; - - hlist_for_each (node, dquot_hash+hashent) { - dquot = hlist_entry(node, struct dquot, dq_hash); - if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) - return dquot; - } - return NODQUOT; -} - -/* Add a dquot to the tail of the free list */ -static inline void put_dquot_last(struct dquot *dquot) -{ - list_add_tail(&dquot->dq_free, &free_dquots); - dqstats.free_dquots++; -} - -static inline void remove_free_dquot(struct dquot *dquot) -{ - if (list_empty(&dquot->dq_free)) - return; - list_del_init(&dquot->dq_free); - dqstats.free_dquots--; -} - -static inline void put_inuse(struct dquot *dquot) -{ - /* We add to the back of inuse list so we don't have to restart - * when traversing this list and we block */ - list_add_tail(&dquot->dq_inuse, &inuse_list); - dqstats.allocated_dquots++; -} - -static inline void remove_inuse(struct dquot *dquot) -{ - dqstats.allocated_dquots--; - list_del(&dquot->dq_inuse); -} -/* - * End of list functions needing dq_list_lock - */ - -static void wait_on_dquot(struct dquot *dquot) -{ - mutex_lock(&dquot->dq_lock); - mutex_unlock(&dquot->dq_lock); -} - -static inline int dquot_dirty(struct dquot *dquot) -{ - return test_bit(DQ_MOD_B, &dquot->dq_flags); -} - -static inline int mark_dquot_dirty(struct dquot *dquot) -{ - return dquot->dq_sb->dq_op->mark_dirty(dquot); -} - -int dquot_mark_dquot_dirty(struct dquot *dquot) -{ - spin_lock(&dq_list_lock); - if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) - list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> - info[dquot->dq_type].dqi_dirty_list); - spin_unlock(&dq_list_lock); - return 0; -} -EXPORT_SYMBOL(dquot_mark_dquot_dirty); - -/* This function needs dq_list_lock */ -static inline int clear_dquot_dirty(struct dquot *dquot) -{ - if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) - return 0; - list_del_init(&dquot->dq_dirty); - return 1; -} - -void mark_info_dirty(struct super_block *sb, int type) -{ - set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); -} -EXPORT_SYMBOL(mark_info_dirty); - -/* - * Read dquot from disk and alloc space for it - */ - -int dquot_acquire(struct dquot *dquot) -{ - int ret = 0, ret2 = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dquot->dq_lock); - mutex_lock(&dqopt->dqio_mutex); - if (!test_bit(DQ_READ_B, &dquot->dq_flags)) - ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); - if (ret < 0) - goto out_iolock; - set_bit(DQ_READ_B, &dquot->dq_flags); - /* Instantiate dquot if needed */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - /* Write the info if needed */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); - if (ret < 0) - goto out_iolock; - if (ret2 < 0) { - ret = ret2; - goto out_iolock; - } - } - set_bit(DQ_ACTIVE_B, &dquot->dq_flags); -out_iolock: - mutex_unlock(&dqopt->dqio_mutex); - mutex_unlock(&dquot->dq_lock); - return ret; -} -EXPORT_SYMBOL(dquot_acquire); - -/* - * Write dquot to disk - */ -int dquot_commit(struct dquot *dquot) -{ - int ret = 0, ret2 = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dqopt->dqio_mutex); - spin_lock(&dq_list_lock); - if (!clear_dquot_dirty(dquot)) { - spin_unlock(&dq_list_lock); - goto out_sem; - } - spin_unlock(&dq_list_lock); - /* Inactive dquot can be only if there was error during read/init - * => we have better not writing it */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); - if (ret >= 0) - ret = ret2; - } -out_sem: - mutex_unlock(&dqopt->dqio_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_commit); - -/* - * Release dquot - */ -int dquot_release(struct dquot *dquot) -{ - int ret = 0, ret2 = 0; - struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - - mutex_lock(&dquot->dq_lock); - /* Check whether we are not racing with some other dqget() */ - if (atomic_read(&dquot->dq_count) > 1) - goto out_dqlock; - mutex_lock(&dqopt->dqio_mutex); - if (dqopt->ops[dquot->dq_type]->release_dqblk) { - ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); - /* Write the info */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); - if (ret >= 0) - ret = ret2; - } - clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - mutex_unlock(&dqopt->dqio_mutex); -out_dqlock: - mutex_unlock(&dquot->dq_lock); - return ret; -} -EXPORT_SYMBOL(dquot_release); - -void dquot_destroy(struct dquot *dquot) -{ - kmem_cache_free(dquot_cachep, dquot); -} -EXPORT_SYMBOL(dquot_destroy); - -static inline void do_destroy_dquot(struct dquot *dquot) -{ - dquot->dq_sb->dq_op->destroy_dquot(dquot); -} - -/* Invalidate all dquots on the list. Note that this function is called after - * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. There can still be some users of quotas due to inodes being - * just deleted or pruned by prune_icache() (those are not attached to any - * list) or parallel quotactl call. We have to wait for such users. - */ -static void invalidate_dquots(struct super_block *sb, int type) -{ - struct dquot *dquot, *tmp; - -restart: - spin_lock(&dq_list_lock); - list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { - if (dquot->dq_sb != sb) - continue; - if (dquot->dq_type != type) - continue; - /* Wait for dquot users */ - if (atomic_read(&dquot->dq_count)) { - DEFINE_WAIT(wait); - - atomic_inc(&dquot->dq_count); - prepare_to_wait(&dquot->dq_wait_unused, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock(&dq_list_lock); - /* Once dqput() wakes us up, we know it's time to free - * the dquot. - * IMPORTANT: we rely on the fact that there is always - * at most one process waiting for dquot to free. - * Otherwise dq_count would be > 1 and we would never - * wake up. - */ - if (atomic_read(&dquot->dq_count) > 1) - schedule(); - finish_wait(&dquot->dq_wait_unused, &wait); - dqput(dquot); - /* At this moment dquot() need not exist (it could be - * reclaimed by prune_dqcache(). Hence we must - * restart. */ - goto restart; - } - /* - * Quota now has no users and it has been written on last - * dqput() - */ - remove_dquot_hash(dquot); - remove_free_dquot(dquot); - remove_inuse(dquot); - do_destroy_dquot(dquot); - } - spin_unlock(&dq_list_lock); -} - -/* Call callback for every active dquot on given filesystem */ -int dquot_scan_active(struct super_block *sb, - int (*fn)(struct dquot *dquot, unsigned long priv), - unsigned long priv) -{ - struct dquot *dquot, *old_dquot = NULL; - int ret = 0; - - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - spin_lock(&dq_list_lock); - list_for_each_entry(dquot, &inuse_list, dq_inuse) { - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) - continue; - if (dquot->dq_sb != sb) - continue; - /* Now we have active dquot so we can just increase use count */ - atomic_inc(&dquot->dq_count); - dqstats.lookups++; - spin_unlock(&dq_list_lock); - dqput(old_dquot); - old_dquot = dquot; - ret = fn(dquot, priv); - if (ret < 0) - goto out; - spin_lock(&dq_list_lock); - /* We are safe to continue now because our dquot could not - * be moved out of the inuse list while we hold the reference */ - } - spin_unlock(&dq_list_lock); -out: - dqput(old_dquot); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_scan_active); - -int vfs_quota_sync(struct super_block *sb, int type) -{ - struct list_head *dirty; - struct dquot *dquot; - struct quota_info *dqopt = sb_dqopt(sb); - int cnt; - - mutex_lock(&dqopt->dqonoff_mutex); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - if (!sb_has_quota_active(sb, cnt)) - continue; - spin_lock(&dq_list_lock); - dirty = &dqopt->info[cnt].dqi_dirty_list; - while (!list_empty(dirty)) { - dquot = list_first_entry(dirty, struct dquot, dq_dirty); - /* Dirty and inactive can be only bad dquot... */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - clear_dquot_dirty(dquot); - continue; - } - /* Now we have active dquot from which someone is - * holding reference so we can safely just increase - * use count */ - atomic_inc(&dquot->dq_count); - dqstats.lookups++; - spin_unlock(&dq_list_lock); - sb->dq_op->write_dquot(dquot); - dqput(dquot); - spin_lock(&dq_list_lock); - } - spin_unlock(&dq_list_lock); - } - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) - && info_dirty(&dqopt->info[cnt])) - sb->dq_op->write_info(sb, cnt); - spin_lock(&dq_list_lock); - dqstats.syncs++; - spin_unlock(&dq_list_lock); - mutex_unlock(&dqopt->dqonoff_mutex); - - return 0; -} -EXPORT_SYMBOL(vfs_quota_sync); - -/* Free unused dquots from cache */ -static void prune_dqcache(int count) -{ - struct list_head *head; - struct dquot *dquot; - - head = free_dquots.prev; - while (head != &free_dquots && count) { - dquot = list_entry(head, struct dquot, dq_free); - remove_dquot_hash(dquot); - remove_free_dquot(dquot); - remove_inuse(dquot); - do_destroy_dquot(dquot); - count--; - head = free_dquots.prev; - } -} - -/* - * This is called from kswapd when we think we need some - * more memory - */ - -static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) -{ - if (nr) { - spin_lock(&dq_list_lock); - prune_dqcache(nr); - spin_unlock(&dq_list_lock); - } - return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; -} - -static struct shrinker dqcache_shrinker = { - .shrink = shrink_dqcache_memory, - .seeks = DEFAULT_SEEKS, -}; - -/* - * Put reference to dquot - * NOTE: If you change this function please check whether dqput_blocks() works right... - */ -void dqput(struct dquot *dquot) -{ - int ret; - - if (!dquot) - return; -#ifdef __DQUOT_PARANOIA - if (!atomic_read(&dquot->dq_count)) { - printk("VFS: dqput: trying to free free dquot\n"); - printk("VFS: device %s, dquot of %s %d\n", - dquot->dq_sb->s_id, - quotatypes[dquot->dq_type], - dquot->dq_id); - BUG(); - } -#endif - - spin_lock(&dq_list_lock); - dqstats.drops++; - spin_unlock(&dq_list_lock); -we_slept: - spin_lock(&dq_list_lock); - if (atomic_read(&dquot->dq_count) > 1) { - /* We have more than one user... nothing to do */ - atomic_dec(&dquot->dq_count); - /* Releasing dquot during quotaoff phase? */ - if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && - atomic_read(&dquot->dq_count) == 1) - wake_up(&dquot->dq_wait_unused); - spin_unlock(&dq_list_lock); - return; - } - /* Need to release dquot? */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { - spin_unlock(&dq_list_lock); - /* Commit dquot before releasing */ - ret = dquot->dq_sb->dq_op->write_dquot(dquot); - if (ret < 0) { - printk(KERN_ERR "VFS: cannot write quota structure on " - "device %s (error %d). Quota may get out of " - "sync!\n", dquot->dq_sb->s_id, ret); - /* - * We clear dirty bit anyway, so that we avoid - * infinite loop here - */ - spin_lock(&dq_list_lock); - clear_dquot_dirty(dquot); - spin_unlock(&dq_list_lock); - } - goto we_slept; - } - /* Clear flag in case dquot was inactive (something bad happened) */ - clear_dquot_dirty(dquot); - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - spin_unlock(&dq_list_lock); - dquot->dq_sb->dq_op->release_dquot(dquot); - goto we_slept; - } - atomic_dec(&dquot->dq_count); -#ifdef __DQUOT_PARANOIA - /* sanity check */ - BUG_ON(!list_empty(&dquot->dq_free)); -#endif - put_dquot_last(dquot); - spin_unlock(&dq_list_lock); -} -EXPORT_SYMBOL(dqput); - -struct dquot *dquot_alloc(struct super_block *sb, int type) -{ - return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); -} -EXPORT_SYMBOL(dquot_alloc); - -static struct dquot *get_empty_dquot(struct super_block *sb, int type) -{ - struct dquot *dquot; - - dquot = sb->dq_op->alloc_dquot(sb, type); - if(!dquot) - return NODQUOT; - - mutex_init(&dquot->dq_lock); - INIT_LIST_HEAD(&dquot->dq_free); - INIT_LIST_HEAD(&dquot->dq_inuse); - INIT_HLIST_NODE(&dquot->dq_hash); - INIT_LIST_HEAD(&dquot->dq_dirty); - init_waitqueue_head(&dquot->dq_wait_unused); - dquot->dq_sb = sb; - dquot->dq_type = type; - atomic_set(&dquot->dq_count, 1); - - return dquot; -} - -/* - * Get reference to dquot - * - * Locking is slightly tricky here. We are guarded from parallel quotaoff() - * destroying our dquot by: - * a) checking for quota flags under dq_list_lock and - * b) getting a reference to dquot before we release dq_list_lock - */ -struct dquot *dqget(struct super_block *sb, unsigned int id, int type) -{ - unsigned int hashent = hashfn(sb, id, type); - struct dquot *dquot = NODQUOT, *empty = NODQUOT; - - if (!sb_has_quota_active(sb, type)) - return NODQUOT; -we_slept: - spin_lock(&dq_list_lock); - spin_lock(&dq_state_lock); - if (!sb_has_quota_active(sb, type)) { - spin_unlock(&dq_state_lock); - spin_unlock(&dq_list_lock); - goto out; - } - spin_unlock(&dq_state_lock); - - if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { - if (empty == NODQUOT) { - spin_unlock(&dq_list_lock); - if ((empty = get_empty_dquot(sb, type)) == NODQUOT) - schedule(); /* Try to wait for a moment... */ - goto we_slept; - } - dquot = empty; - empty = NODQUOT; - dquot->dq_id = id; - /* all dquots go on the inuse_list */ - put_inuse(dquot); - /* hash it first so it can be found */ - insert_dquot_hash(dquot); - dqstats.lookups++; - spin_unlock(&dq_list_lock); - } else { - if (!atomic_read(&dquot->dq_count)) - remove_free_dquot(dquot); - atomic_inc(&dquot->dq_count); - dqstats.cache_hits++; - dqstats.lookups++; - spin_unlock(&dq_list_lock); - } - /* Wait for dq_lock - after this we know that either dquot_release() is already - * finished or it will be canceled due to dq_count > 1 test */ - wait_on_dquot(dquot); - /* Read the dquot and instantiate it (everything done only if needed) */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { - dqput(dquot); - dquot = NODQUOT; - goto out; - } -#ifdef __DQUOT_PARANOIA - BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ -#endif -out: - if (empty) - do_destroy_dquot(empty); - - return dquot; -} -EXPORT_SYMBOL(dqget); - -static int dqinit_needed(struct inode *inode, int type) -{ - int cnt; - - if (IS_NOQUOTA(inode)) - return 0; - if (type != -1) - return inode->i_dquot[type] == NODQUOT; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] == NODQUOT) - return 1; - return 0; -} - -/* This routine is guarded by dqonoff_mutex mutex */ -static void add_dquot_ref(struct super_block *sb, int type) -{ - struct inode *inode, *old_inode = NULL; - - spin_lock(&inode_lock); - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { - if (!atomic_read(&inode->i_writecount)) - continue; - if (!dqinit_needed(inode, type)) - continue; - if (inode->i_state & (I_FREEING|I_WILL_FREE)) - continue; - - __iget(inode); - spin_unlock(&inode_lock); - - iput(old_inode); - sb->dq_op->initialize(inode, type); - /* We hold a reference to 'inode' so it couldn't have been - * removed from s_inodes list while we dropped the inode_lock. - * We cannot iput the inode now as we can be holding the last - * reference and we cannot iput it under inode_lock. So we - * keep the reference and iput it later. */ - old_inode = inode; - spin_lock(&inode_lock); - } - spin_unlock(&inode_lock); - iput(old_inode); -} - -/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ -static inline int dqput_blocks(struct dquot *dquot) -{ - if (atomic_read(&dquot->dq_count) <= 1) - return 1; - return 0; -} - -/* Remove references to dquots from inode - add dquot to list for freeing if needed */ -/* We can't race with anybody because we hold dqptr_sem for writing... */ -static int remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head) -{ - struct dquot *dquot = inode->i_dquot[type]; - - inode->i_dquot[type] = NODQUOT; - if (dquot != NODQUOT) { - if (dqput_blocks(dquot)) { -#ifdef __DQUOT_PARANOIA - if (atomic_read(&dquot->dq_count) != 1) - printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); -#endif - spin_lock(&dq_list_lock); - list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ - spin_unlock(&dq_list_lock); - return 1; - } - else - dqput(dquot); /* We have guaranteed we won't block */ - } - return 0; -} - -/* Free list of dquots - called from inode.c */ -/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ -static void put_dquot_list(struct list_head *tofree_head) -{ - struct list_head *act_head; - struct dquot *dquot; - - act_head = tofree_head->next; - /* So now we have dquots on the list... Just free them */ - while (act_head != tofree_head) { - dquot = list_entry(act_head, struct dquot, dq_free); - act_head = act_head->next; - list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ - dqput(dquot); - } -} - -static void remove_dquot_ref(struct super_block *sb, int type, - struct list_head *tofree_head) -{ - struct inode *inode; - - spin_lock(&inode_lock); - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { - if (!IS_NOQUOTA(inode)) - remove_inode_dquot_ref(inode, type, tofree_head); - } - spin_unlock(&inode_lock); -} - -/* Gather all references from inodes and drop them */ -static void drop_dquot_ref(struct super_block *sb, int type) -{ - LIST_HEAD(tofree_head); - - if (sb->dq_op) { - down_write(&sb_dqopt(sb)->dqptr_sem); - remove_dquot_ref(sb, type, &tofree_head); - up_write(&sb_dqopt(sb)->dqptr_sem); - put_dquot_list(&tofree_head); - } -} - -static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_curinodes += number; -} - -static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_curspace += number; -} - -static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_rsvspace += number; -} - -/* - * Claim reserved quota space - */ -static void dquot_claim_reserved_space(struct dquot *dquot, - qsize_t number) -{ - WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); - dquot->dq_dqb.dqb_curspace += number; - dquot->dq_dqb.dqb_rsvspace -= number; -} - -static inline -void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_rsvspace -= number; -} - -static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) -{ - if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || - dquot->dq_dqb.dqb_curinodes >= number) - dquot->dq_dqb.dqb_curinodes -= number; - else - dquot->dq_dqb.dqb_curinodes = 0; - if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) - dquot->dq_dqb.dqb_itime = (time_t) 0; - clear_bit(DQ_INODES_B, &dquot->dq_flags); -} - -static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) -{ - if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || - dquot->dq_dqb.dqb_curspace >= number) - dquot->dq_dqb.dqb_curspace -= number; - else - dquot->dq_dqb.dqb_curspace = 0; - if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) - dquot->dq_dqb.dqb_btime = (time_t) 0; - clear_bit(DQ_BLKS_B, &dquot->dq_flags); -} - -static int warning_issued(struct dquot *dquot, const int warntype) -{ - int flag = (warntype == QUOTA_NL_BHARDWARN || - warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : - ((warntype == QUOTA_NL_IHARDWARN || - warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); - - if (!flag) - return 0; - return test_and_set_bit(flag, &dquot->dq_flags); -} - -#ifdef CONFIG_PRINT_QUOTA_WARNING -static int flag_print_warnings = 1; - -static inline int need_print_warning(struct dquot *dquot) -{ - if (!flag_print_warnings) - return 0; - - switch (dquot->dq_type) { - case USRQUOTA: - return current_fsuid() == dquot->dq_id; - case GRPQUOTA: - return in_group_p(dquot->dq_id); - } - return 0; -} - -/* Print warning to user which exceeded quota */ -static void print_warning(struct dquot *dquot, const int warntype) -{ - char *msg = NULL; - struct tty_struct *tty; - - if (warntype == QUOTA_NL_IHARDBELOW || - warntype == QUOTA_NL_ISOFTBELOW || - warntype == QUOTA_NL_BHARDBELOW || - warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) - return; - - tty = get_current_tty(); - if (!tty) - return; - tty_write_message(tty, dquot->dq_sb->s_id); - if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) - tty_write_message(tty, ": warning, "); - else - tty_write_message(tty, ": write failed, "); - tty_write_message(tty, quotatypes[dquot->dq_type]); - switch (warntype) { - case QUOTA_NL_IHARDWARN: - msg = " file limit reached.\r\n"; - break; - case QUOTA_NL_ISOFTLONGWARN: - msg = " file quota exceeded too long.\r\n"; - break; - case QUOTA_NL_ISOFTWARN: - msg = " file quota exceeded.\r\n"; - break; - case QUOTA_NL_BHARDWARN: - msg = " block limit reached.\r\n"; - break; - case QUOTA_NL_BSOFTLONGWARN: - msg = " block quota exceeded too long.\r\n"; - break; - case QUOTA_NL_BSOFTWARN: - msg = " block quota exceeded.\r\n"; - break; - } - tty_write_message(tty, msg); - tty_kref_put(tty); -} -#endif - -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE - -/* Netlink family structure for quota */ -static struct genl_family quota_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = 0, - .name = "VFS_DQUOT", - .version = 1, - .maxattr = QUOTA_NL_A_MAX, -}; - -/* Send warning to userspace about user which exceeded quota */ -static void send_warning(const struct dquot *dquot, const char warntype) -{ - static atomic_t seq; - struct sk_buff *skb; - void *msg_head; - int ret; - int msg_size = 4 * nla_total_size(sizeof(u32)) + - 2 * nla_total_size(sizeof(u64)); - - /* We have to allocate using GFP_NOFS as we are called from a - * filesystem performing write and thus further recursion into - * the fs to free some data could cause deadlocks. */ - skb = genlmsg_new(msg_size, GFP_NOFS); - if (!skb) { - printk(KERN_ERR - "VFS: Not enough memory to send quota warning.\n"); - return; - } - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), - "a_genl_family, 0, QUOTA_NL_C_WARNING); - if (!msg_head) { - printk(KERN_ERR - "VFS: Cannot store netlink header in quota warning.\n"); - goto err_out; - } - ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); - if (ret) - goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); - if (ret) - goto attr_err_out; - ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); - if (ret) - goto attr_err_out; - ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, - MAJOR(dquot->dq_sb->s_dev)); - if (ret) - goto attr_err_out; - ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, - MINOR(dquot->dq_sb->s_dev)); - if (ret) - goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); - if (ret) - goto attr_err_out; - genlmsg_end(skb, msg_head); - - ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); - if (ret < 0 && ret != -ESRCH) - printk(KERN_ERR - "VFS: Failed to send notification message: %d\n", ret); - return; -attr_err_out: - printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); -err_out: - kfree_skb(skb); -} -#endif -/* - * Write warnings to the console and send warning messages over netlink. - * - * Note that this function can sleep. - */ -static inline void flush_warnings(struct dquot * const *dquots, char *warntype) -{ - int i; - - for (i = 0; i < MAXQUOTAS; i++) - if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && - !warning_issued(dquots[i], warntype[i])) { -#ifdef CONFIG_PRINT_QUOTA_WARNING - print_warning(dquots[i], warntype[i]); -#endif -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE - send_warning(dquots[i], warntype[i]); -#endif - } -} - -static inline char ignore_hardlimit(struct dquot *dquot) -{ - struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; - - return capable(CAP_SYS_RESOURCE) && - (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); -} - -/* needs dq_data_lock */ -static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) -{ - *warntype = QUOTA_NL_NOWARN; - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || - test_bit(DQ_FAKE_B, &dquot->dq_flags)) - return QUOTA_OK; - - if (dquot->dq_dqb.dqb_ihardlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && - !ignore_hardlimit(dquot)) { - *warntype = QUOTA_NL_IHARDWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && - dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && - !ignore_hardlimit(dquot)) { - *warntype = QUOTA_NL_ISOFTLONGWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && - dquot->dq_dqb.dqb_itime == 0) { - *warntype = QUOTA_NL_ISOFTWARN; - dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; - } - - return QUOTA_OK; -} - -/* needs dq_data_lock */ -static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) -{ - qsize_t tspace; - - *warntype = QUOTA_NL_NOWARN; - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || - test_bit(DQ_FAKE_B, &dquot->dq_flags)) - return QUOTA_OK; - - tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace - + space; - - if (dquot->dq_dqb.dqb_bhardlimit && - tspace > dquot->dq_dqb.dqb_bhardlimit && - !ignore_hardlimit(dquot)) { - if (!prealloc) - *warntype = QUOTA_NL_BHARDWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_bsoftlimit && - tspace > dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && - !ignore_hardlimit(dquot)) { - if (!prealloc) - *warntype = QUOTA_NL_BSOFTLONGWARN; - return NO_QUOTA; - } - - if (dquot->dq_dqb.dqb_bsoftlimit && - tspace > dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_btime == 0) { - if (!prealloc) { - *warntype = QUOTA_NL_BSOFTWARN; - dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; - } - else - /* - * We don't allow preallocation to exceed softlimit so exceeding will - * be always printed - */ - return NO_QUOTA; - } - - return QUOTA_OK; -} - -static int info_idq_free(struct dquot *dquot, qsize_t inodes) -{ - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || - dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || - !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) - return QUOTA_NL_NOWARN; - - if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) - return QUOTA_NL_ISOFTBELOW; - if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && - dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) - return QUOTA_NL_IHARDBELOW; - return QUOTA_NL_NOWARN; -} - -static int info_bdq_free(struct dquot *dquot, qsize_t space) -{ - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || - dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) - return QUOTA_NL_NOWARN; - - if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) - return QUOTA_NL_BSOFTBELOW; - if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && - dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) - return QUOTA_NL_BHARDBELOW; - return QUOTA_NL_NOWARN; -} -/* - * Initialize quota pointers in inode - * We do things in a bit complicated way but by that we avoid calling - * dqget() and thus filesystem callbacks under dqptr_sem. - */ -int dquot_initialize(struct inode *inode, int type) -{ - unsigned int id = 0; - int cnt, ret = 0; - struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; - struct super_block *sb = inode->i_sb; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return 0; - - /* First get references to structures we might need. */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - switch (cnt) { - case USRQUOTA: - id = inode->i_uid; - break; - case GRPQUOTA: - id = inode->i_gid; - break; - } - got[cnt] = dqget(sb, id, cnt); - } - - down_write(&sb_dqopt(sb)->dqptr_sem); - /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ - if (IS_NOQUOTA(inode)) - goto out_err; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - /* Avoid races with quotaoff() */ - if (!sb_has_quota_active(sb, cnt)) - continue; - if (inode->i_dquot[cnt] == NODQUOT) { - inode->i_dquot[cnt] = got[cnt]; - got[cnt] = NODQUOT; - } - } -out_err: - up_write(&sb_dqopt(sb)->dqptr_sem); - /* Drop unused references */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - dqput(got[cnt]); - return ret; -} -EXPORT_SYMBOL(dquot_initialize); - -/* - * Release all quotas referenced by inode - */ -int dquot_drop(struct inode *inode) -{ - int cnt; - struct dquot *put[MAXQUOTAS]; - - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - put[cnt] = inode->i_dquot[cnt]; - inode->i_dquot[cnt] = NODQUOT; - } - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - dqput(put[cnt]); - return 0; -} -EXPORT_SYMBOL(dquot_drop); - -/* Wrapper to remove references to quota structures from inode */ -void vfs_dq_drop(struct inode *inode) -{ - /* Here we can get arbitrary inode from clear_inode() so we have - * to be careful. OTOH we don't need locking as quota operations - * are allowed to change only at mount time */ - if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op - && inode->i_sb->dq_op->drop) { - int cnt; - /* Test before calling to rule out calls from proc and such - * where we are not allowed to block. Note that this is - * actually reliable test even without the lock - the caller - * must assure that nobody can come after the DQUOT_DROP and - * add quota pointers back anyway */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] != NODQUOT) - break; - if (cnt < MAXQUOTAS) - inode->i_sb->dq_op->drop(inode); - } -} -EXPORT_SYMBOL(vfs_dq_drop); - -/* - * Following four functions update i_blocks+i_bytes fields and - * quota information (together with appropriate checks) - * NOTE: We absolutely rely on the fact that caller dirties - * the inode (usually macros in quotaops.h care about this) and - * holds a handle for the current transaction so that dquot write and - * inode write go into the same transaction. - */ - -/* - * This operation can block, but only after everything is updated - */ -int __dquot_alloc_space(struct inode *inode, qsize_t number, - int warn, int reserve) -{ - int cnt, ret = QUOTA_OK; - char warntype[MAXQUOTAS]; - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; - - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) - == NO_QUOTA) { - ret = NO_QUOTA; - goto out_unlock; - } - } - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - if (reserve) - dquot_resv_space(inode->i_dquot[cnt], number); - else - dquot_incr_space(inode->i_dquot[cnt], number); - } - if (!reserve) - inode_add_bytes(inode, number); -out_unlock: - spin_unlock(&dq_data_lock); - flush_warnings(inode->i_dquot, warntype); - return ret; -} - -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) -{ - int cnt, ret = QUOTA_OK; - - /* - * First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex - */ - if (IS_NOQUOTA(inode)) { - inode_add_bytes(inode, number); - goto out; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - inode_add_bytes(inode, number); - goto out_unlock; - } - - ret = __dquot_alloc_space(inode, number, warn, 0); - if (ret == NO_QUOTA) - goto out_unlock; - - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); -out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return ret; -} -EXPORT_SYMBOL(dquot_alloc_space); - -int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) -{ - int ret = QUOTA_OK; - - if (IS_NOQUOTA(inode)) - goto out; - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) - goto out_unlock; - - ret = __dquot_alloc_space(inode, number, warn, 1); -out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return ret; -} -EXPORT_SYMBOL(dquot_reserve_space); - -/* - * This operation can block, but only after everything is updated - */ -int dquot_alloc_inode(const struct inode *inode, qsize_t number) -{ - int cnt, ret = NO_QUOTA; - char warntype[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return QUOTA_OK; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - warntype[cnt] = QUOTA_NL_NOWARN; - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; - } - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) - goto warn_put_all; - } - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - dquot_incr_inodes(inode->i_dquot[cnt], number); - } - ret = QUOTA_OK; -warn_put_all: - spin_unlock(&dq_data_lock); - if (ret == QUOTA_OK) - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return ret; -} -EXPORT_SYMBOL(dquot_alloc_inode); - -int dquot_claim_space(struct inode *inode, qsize_t number) -{ - int cnt; - int ret = QUOTA_OK; - - if (IS_NOQUOTA(inode)) { - inode_add_bytes(inode, number); - goto out; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - inode_add_bytes(inode, number); - goto out; - } - - spin_lock(&dq_data_lock); - /* Claim reserved quotas to allocated quotas */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) - dquot_claim_reserved_space(inode->i_dquot[cnt], - number); - } - /* Update inode bytes */ - inode_add_bytes(inode, number); - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return ret; -} -EXPORT_SYMBOL(dquot_claim_space); - -/* - * Release reserved quota space - */ -void dquot_release_reserved_space(struct inode *inode, qsize_t number) -{ - int cnt; - - if (IS_NOQUOTA(inode)) - goto out; - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - if (IS_NOQUOTA(inode)) - goto out_unlock; - - spin_lock(&dq_data_lock); - /* Release reserved dquots */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) - dquot_free_reserved_space(inode->i_dquot[cnt], number); - } - spin_unlock(&dq_data_lock); - -out_unlock: - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); -out: - return; -} -EXPORT_SYMBOL(dquot_release_reserved_space); - -/* - * This operation can block, but only after everything is updated - */ -int dquot_free_space(struct inode *inode, qsize_t number) -{ - unsigned int cnt; - char warntype[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) { -out_sub: - inode_sub_bytes(inode, number); - return QUOTA_OK; - } - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Now recheck reliably when holding dqptr_sem */ - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - goto out_sub; - } - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); - dquot_decr_space(inode->i_dquot[cnt], number); - } - inode_sub_bytes(inode, number); - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; -} -EXPORT_SYMBOL(dquot_free_space); - -/* - * This operation can block, but only after everything is updated - */ -int dquot_free_inode(const struct inode *inode, qsize_t number) -{ - unsigned int cnt; - char warntype[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return QUOTA_OK; - - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Now recheck reliably when holding dqptr_sem */ - if (IS_NOQUOTA(inode)) { - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; - } - spin_lock(&dq_data_lock); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) - continue; - warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); - dquot_decr_inodes(inode->i_dquot[cnt], number); - } - spin_unlock(&dq_data_lock); - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt]) - mark_dquot_dirty(inode->i_dquot[cnt]); - flush_warnings(inode->i_dquot, warntype); - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); - return QUOTA_OK; -} -EXPORT_SYMBOL(dquot_free_inode); - -/* - * call back function, get reserved quota space from underlying fs - */ -qsize_t dquot_get_reserved_space(struct inode *inode) -{ - qsize_t reserved_space = 0; - - if (sb_any_quota_active(inode->i_sb) && - inode->i_sb->dq_op->get_reserved_space) - reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); - return reserved_space; -} - -/* - * Transfer the number of inode and blocks from one diskquota to an other. - * - * This operation can block, but only after everything is updated - * A transaction must be started when entering this function. - */ -int dquot_transfer(struct inode *inode, struct iattr *iattr) -{ - qsize_t space, cur_space; - qsize_t rsv_space = 0; - struct dquot *transfer_from[MAXQUOTAS]; - struct dquot *transfer_to[MAXQUOTAS]; - int cnt, ret = QUOTA_OK; - int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, - chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; - char warntype_to[MAXQUOTAS]; - char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; - - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ - if (IS_NOQUOTA(inode)) - return QUOTA_OK; - /* Initialize the arrays */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - transfer_from[cnt] = NODQUOT; - transfer_to[cnt] = NODQUOT; - warntype_to[cnt] = QUOTA_NL_NOWARN; - switch (cnt) { - case USRQUOTA: - if (!chuid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); - break; - case GRPQUOTA: - if (!chgid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); - break; - } - } - - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Now recheck reliably when holding dqptr_sem */ - if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - goto put_all; - } - spin_lock(&dq_data_lock); - cur_space = inode_get_bytes(inode); - rsv_space = dquot_get_reserved_space(inode); - space = cur_space + rsv_space; - /* Build the transfer_from list and check the limits */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (transfer_to[cnt] == NODQUOT) - continue; - transfer_from[cnt] = inode->i_dquot[cnt]; - if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == - NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, - warntype_to + cnt) == NO_QUOTA) - goto over_quota; - } - - /* - * Finally perform the needed transfer from transfer_from to transfer_to - */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - /* - * Skip changes for same uid or gid or for turned off quota-type. - */ - if (transfer_to[cnt] == NODQUOT) - continue; - - /* Due to IO error we might not have transfer_from[] structure */ - if (transfer_from[cnt]) { - warntype_from_inodes[cnt] = - info_idq_free(transfer_from[cnt], 1); - warntype_from_space[cnt] = - info_bdq_free(transfer_from[cnt], space); - dquot_decr_inodes(transfer_from[cnt], 1); - dquot_decr_space(transfer_from[cnt], cur_space); - dquot_free_reserved_space(transfer_from[cnt], - rsv_space); - } - - dquot_incr_inodes(transfer_to[cnt], 1); - dquot_incr_space(transfer_to[cnt], cur_space); - dquot_resv_space(transfer_to[cnt], rsv_space); - - inode->i_dquot[cnt] = transfer_to[cnt]; - } - spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - - /* Dirtify all the dquots - this can block when journalling */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (transfer_from[cnt]) - mark_dquot_dirty(transfer_from[cnt]); - if (transfer_to[cnt]) { - mark_dquot_dirty(transfer_to[cnt]); - /* The reference we got is transferred to the inode */ - transfer_to[cnt] = NODQUOT; - } - } -warn_put_all: - flush_warnings(transfer_to, warntype_to); - flush_warnings(transfer_from, warntype_from_inodes); - flush_warnings(transfer_from, warntype_from_space); -put_all: - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - dqput(transfer_from[cnt]); - dqput(transfer_to[cnt]); - } - return ret; -over_quota: - spin_unlock(&dq_data_lock); - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); - /* Clear dquot pointers we don't want to dqput() */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - transfer_from[cnt] = NODQUOT; - ret = NO_QUOTA; - goto warn_put_all; -} -EXPORT_SYMBOL(dquot_transfer); - -/* Wrapper for transferring ownership of an inode */ -int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) -{ - if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { - vfs_dq_init(inode); - if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) - return 1; - } - return 0; -} -EXPORT_SYMBOL(vfs_dq_transfer); - -/* - * Write info of quota file to disk - */ -int dquot_commit_info(struct super_block *sb, int type) -{ - int ret; - struct quota_info *dqopt = sb_dqopt(sb); - - mutex_lock(&dqopt->dqio_mutex); - ret = dqopt->ops[type]->write_file_info(sb, type); - mutex_unlock(&dqopt->dqio_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_commit_info); - -/* - * Definitions of diskquota operations. - */ -struct dquot_operations dquot_operations = { - .initialize = dquot_initialize, - .drop = dquot_drop, - .alloc_space = dquot_alloc_space, - .alloc_inode = dquot_alloc_inode, - .free_space = dquot_free_space, - .free_inode = dquot_free_inode, - .transfer = dquot_transfer, - .write_dquot = dquot_commit, - .acquire_dquot = dquot_acquire, - .release_dquot = dquot_release, - .mark_dirty = dquot_mark_dquot_dirty, - .write_info = dquot_commit_info, - .alloc_dquot = dquot_alloc, - .destroy_dquot = dquot_destroy, -}; - -/* - * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) - */ -int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) -{ - int cnt, ret = 0; - struct quota_info *dqopt = sb_dqopt(sb); - struct inode *toputinode[MAXQUOTAS]; - - /* Cannot turn off usage accounting without turning off limits, or - * suspend quotas and simultaneously turn quotas off. */ - if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) - || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | - DQUOT_USAGE_ENABLED))) - return -EINVAL; - - /* We need to serialize quota_off() for device */ - mutex_lock(&dqopt->dqonoff_mutex); - - /* - * Skip everything if there's nothing to do. We have to do this because - * sometimes we are called when fill_super() failed and calling - * sync_fs() in such cases does no good. - */ - if (!sb_any_quota_loaded(sb)) { - mutex_unlock(&dqopt->dqonoff_mutex); - return 0; - } - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - toputinode[cnt] = NULL; - if (type != -1 && cnt != type) - continue; - if (!sb_has_quota_loaded(sb, cnt)) - continue; - - if (flags & DQUOT_SUSPENDED) { - spin_lock(&dq_state_lock); - dqopt->flags |= - dquot_state_flag(DQUOT_SUSPENDED, cnt); - spin_unlock(&dq_state_lock); - } else { - spin_lock(&dq_state_lock); - dqopt->flags &= ~dquot_state_flag(flags, cnt); - /* Turning off suspended quotas? */ - if (!sb_has_quota_loaded(sb, cnt) && - sb_has_quota_suspended(sb, cnt)) { - dqopt->flags &= ~dquot_state_flag( - DQUOT_SUSPENDED, cnt); - spin_unlock(&dq_state_lock); - iput(dqopt->files[cnt]); - dqopt->files[cnt] = NULL; - continue; - } - spin_unlock(&dq_state_lock); - } - - /* We still have to keep quota loaded? */ - if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) - continue; - - /* Note: these are blocking operations */ - drop_dquot_ref(sb, cnt); - invalidate_dquots(sb, cnt); - /* - * Now all dquots should be invalidated, all writes done so we should be only - * users of the info. No locks needed. - */ - if (info_dirty(&dqopt->info[cnt])) - sb->dq_op->write_info(sb, cnt); - if (dqopt->ops[cnt]->free_file_info) - dqopt->ops[cnt]->free_file_info(sb, cnt); - put_quota_format(dqopt->info[cnt].dqi_format); - - toputinode[cnt] = dqopt->files[cnt]; - if (!sb_has_quota_loaded(sb, cnt)) - dqopt->files[cnt] = NULL; - dqopt->info[cnt].dqi_flags = 0; - dqopt->info[cnt].dqi_igrace = 0; - dqopt->info[cnt].dqi_bgrace = 0; - dqopt->ops[cnt] = NULL; - } - mutex_unlock(&dqopt->dqonoff_mutex); - - /* Skip syncing and setting flags if quota files are hidden */ - if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) - goto put_inodes; - - /* Sync the superblock so that buffers with quota data are written to - * disk (and so userspace sees correct data afterwards). */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); - /* Now the quota files are just ordinary files and we can set the - * inode flags back. Moreover we discard the pagecache so that - * userspace sees the writes we did bypassing the pagecache. We - * must also discard the blockdev buffers so that we see the - * changes done by userspace on the next quotaon() */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (toputinode[cnt]) { - mutex_lock(&dqopt->dqonoff_mutex); - /* If quota was reenabled in the meantime, we have - * nothing to do */ - if (!sb_has_quota_loaded(sb, cnt)) { - mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); - toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | - S_NOATIME | S_NOQUOTA); - truncate_inode_pages(&toputinode[cnt]->i_data, 0); - mutex_unlock(&toputinode[cnt]->i_mutex); - mark_inode_dirty(toputinode[cnt]); - } - mutex_unlock(&dqopt->dqonoff_mutex); - } - if (sb->s_bdev) - invalidate_bdev(sb->s_bdev); -put_inodes: - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (toputinode[cnt]) { - /* On remount RO, we keep the inode pointer so that we - * can reenable quota on the subsequent remount RW. We - * have to check 'flags' variable and not use sb_has_ - * function because another quotaon / quotaoff could - * change global state before we got here. We refuse - * to suspend quotas when there is pending delete on - * the quota file... */ - if (!(flags & DQUOT_SUSPENDED)) - iput(toputinode[cnt]); - else if (!toputinode[cnt]->i_nlink) - ret = -EBUSY; - } - return ret; -} -EXPORT_SYMBOL(vfs_quota_disable); - -int vfs_quota_off(struct super_block *sb, int type, int remount) -{ - return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : - (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); -} -EXPORT_SYMBOL(vfs_quota_off); -/* - * Turn quotas on on a device - */ - -/* - * Helper function to turn quotas on when we already have the inode of - * quota file and no quota information is loaded. - */ -static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, - unsigned int flags) -{ - struct quota_format_type *fmt = find_quota_format(format_id); - struct super_block *sb = inode->i_sb; - struct quota_info *dqopt = sb_dqopt(sb); - int error; - int oldflags = -1; - - if (!fmt) - return -ESRCH; - if (!S_ISREG(inode->i_mode)) { - error = -EACCES; - goto out_fmt; - } - if (IS_RDONLY(inode)) { - error = -EROFS; - goto out_fmt; - } - if (!sb->s_op->quota_write || !sb->s_op->quota_read) { - error = -EINVAL; - goto out_fmt; - } - /* Usage always has to be set... */ - if (!(flags & DQUOT_USAGE_ENABLED)) { - error = -EINVAL; - goto out_fmt; - } - - if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { - /* As we bypass the pagecache we must now flush the inode so - * that we see all the changes from userspace... */ - write_inode_now(inode, 1); - /* And now flush the block cache so that kernel sees the - * changes */ - invalidate_bdev(sb->s_bdev); - } - mutex_lock(&inode->i_mutex); - mutex_lock(&dqopt->dqonoff_mutex); - if (sb_has_quota_loaded(sb, type)) { - error = -EBUSY; - goto out_lock; - } - - if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { - /* We don't want quota and atime on quota files (deadlocks - * possible) Also nobody should write to the file - we use - * special IO operations which ignore the immutable bit. */ - down_write(&dqopt->dqptr_sem); - oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); - inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; - up_write(&dqopt->dqptr_sem); - sb->dq_op->drop(inode); - } - - error = -EIO; - dqopt->files[type] = igrab(inode); - if (!dqopt->files[type]) - goto out_lock; - error = -EINVAL; - if (!fmt->qf_ops->check_quota_file(sb, type)) - goto out_file_init; - - dqopt->ops[type] = fmt->qf_ops; - dqopt->info[type].dqi_format = fmt; - dqopt->info[type].dqi_fmt_id = format_id; - INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - mutex_lock(&dqopt->dqio_mutex); - if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { - mutex_unlock(&dqopt->dqio_mutex); - goto out_file_init; - } - mutex_unlock(&dqopt->dqio_mutex); - mutex_unlock(&inode->i_mutex); - spin_lock(&dq_state_lock); - dqopt->flags |= dquot_state_flag(flags, type); - spin_unlock(&dq_state_lock); - - add_dquot_ref(sb, type); - mutex_unlock(&dqopt->dqonoff_mutex); - - return 0; - -out_file_init: - dqopt->files[type] = NULL; - iput(inode); -out_lock: - mutex_unlock(&dqopt->dqonoff_mutex); - if (oldflags != -1) { - down_write(&dqopt->dqptr_sem); - /* Set the flags back (in the case of accidental quotaon() - * on a wrong file we don't want to mess up the flags) */ - inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); - inode->i_flags |= oldflags; - up_write(&dqopt->dqptr_sem); - } - mutex_unlock(&inode->i_mutex); -out_fmt: - put_quota_format(fmt); - - return error; -} - -/* Reenable quotas on remount RW */ -static int vfs_quota_on_remount(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - struct inode *inode; - int ret; - unsigned int flags; - - mutex_lock(&dqopt->dqonoff_mutex); - if (!sb_has_quota_suspended(sb, type)) { - mutex_unlock(&dqopt->dqonoff_mutex); - return 0; - } - inode = dqopt->files[type]; - dqopt->files[type] = NULL; - spin_lock(&dq_state_lock); - flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | - DQUOT_LIMITS_ENABLED, type); - dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); - spin_unlock(&dq_state_lock); - mutex_unlock(&dqopt->dqonoff_mutex); - - flags = dquot_generic_flag(flags, type); - ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, - flags); - iput(inode); - - return ret; -} - -int vfs_quota_on_path(struct super_block *sb, int type, int format_id, - struct path *path) -{ - int error = security_quota_on(path->dentry); - if (error) - return error; - /* Quota file not on the same filesystem? */ - if (path->mnt->mnt_sb != sb) - error = -EXDEV; - else - error = vfs_load_quota_inode(path->dentry->d_inode, type, - format_id, DQUOT_USAGE_ENABLED | - DQUOT_LIMITS_ENABLED); - return error; -} -EXPORT_SYMBOL(vfs_quota_on_path); - -int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, - int remount) -{ - struct path path; - int error; - - if (remount) - return vfs_quota_on_remount(sb, type); - - error = kern_path(name, LOOKUP_FOLLOW, &path); - if (!error) { - error = vfs_quota_on_path(sb, type, format_id, &path); - path_put(&path); - } - return error; -} -EXPORT_SYMBOL(vfs_quota_on); - -/* - * More powerful function for turning on quotas allowing setting - * of individual quota flags - */ -int vfs_quota_enable(struct inode *inode, int type, int format_id, - unsigned int flags) -{ - int ret = 0; - struct super_block *sb = inode->i_sb; - struct quota_info *dqopt = sb_dqopt(sb); - - /* Just unsuspend quotas? */ - if (flags & DQUOT_SUSPENDED) - return vfs_quota_on_remount(sb, type); - if (!flags) - return 0; - /* Just updating flags needed? */ - if (sb_has_quota_loaded(sb, type)) { - mutex_lock(&dqopt->dqonoff_mutex); - /* Now do a reliable test... */ - if (!sb_has_quota_loaded(sb, type)) { - mutex_unlock(&dqopt->dqonoff_mutex); - goto load_quota; - } - if (flags & DQUOT_USAGE_ENABLED && - sb_has_quota_usage_enabled(sb, type)) { - ret = -EBUSY; - goto out_lock; - } - if (flags & DQUOT_LIMITS_ENABLED && - sb_has_quota_limits_enabled(sb, type)) { - ret = -EBUSY; - goto out_lock; - } - spin_lock(&dq_state_lock); - sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); - spin_unlock(&dq_state_lock); -out_lock: - mutex_unlock(&dqopt->dqonoff_mutex); - return ret; - } - -load_quota: - return vfs_load_quota_inode(inode, type, format_id, flags); -} -EXPORT_SYMBOL(vfs_quota_enable); - -/* - * This function is used when filesystem needs to initialize quotas - * during mount time. - */ -int vfs_quota_on_mount(struct super_block *sb, char *qf_name, - int format_id, int type) -{ - struct dentry *dentry; - int error; - - dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (!dentry->d_inode) { - error = -ENOENT; - goto out; - } - - error = security_quota_on(dentry); - if (!error) - error = vfs_load_quota_inode(dentry->d_inode, type, format_id, - DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); - -out: - dput(dentry); - return error; -} -EXPORT_SYMBOL(vfs_quota_on_mount); - -/* Wrapper to turn on quotas when remounting rw */ -int vfs_dq_quota_on_remount(struct super_block *sb) -{ - int cnt; - int ret = 0, err; - - if (!sb->s_qcop || !sb->s_qcop->quota_on) - return -ENOSYS; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); - if (err < 0 && !ret) - ret = err; - } - return ret; -} -EXPORT_SYMBOL(vfs_dq_quota_on_remount); - -static inline qsize_t qbtos(qsize_t blocks) -{ - return blocks << QIF_DQBLKSIZE_BITS; -} - -static inline qsize_t stoqb(qsize_t space) -{ - return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; -} - -/* Generic routine for getting common part of quota structure */ -static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) -{ - struct mem_dqblk *dm = &dquot->dq_dqb; - - spin_lock(&dq_data_lock); - di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); - di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); - di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; - di->dqb_ihardlimit = dm->dqb_ihardlimit; - di->dqb_isoftlimit = dm->dqb_isoftlimit; - di->dqb_curinodes = dm->dqb_curinodes; - di->dqb_btime = dm->dqb_btime; - di->dqb_itime = dm->dqb_itime; - di->dqb_valid = QIF_ALL; - spin_unlock(&dq_data_lock); -} - -int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) -{ - struct dquot *dquot; - - dquot = dqget(sb, id, type); - if (dquot == NODQUOT) - return -ESRCH; - do_get_dqblk(dquot, di); - dqput(dquot); - - return 0; -} -EXPORT_SYMBOL(vfs_get_dqblk); - -/* Generic routine for setting common part of quota structure */ -static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) -{ - struct mem_dqblk *dm = &dquot->dq_dqb; - int check_blim = 0, check_ilim = 0; - struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; - - if ((di->dqb_valid & QIF_BLIMITS && - (di->dqb_bhardlimit > dqi->dqi_maxblimit || - di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || - (di->dqb_valid & QIF_ILIMITS && - (di->dqb_ihardlimit > dqi->dqi_maxilimit || - di->dqb_isoftlimit > dqi->dqi_maxilimit))) - return -ERANGE; - - spin_lock(&dq_data_lock); - if (di->dqb_valid & QIF_SPACE) { - dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; - check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_BLIMITS) { - dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); - dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); - check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_INODES) { - dm->dqb_curinodes = di->dqb_curinodes; - check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_ILIMITS) { - dm->dqb_isoftlimit = di->dqb_isoftlimit; - dm->dqb_ihardlimit = di->dqb_ihardlimit; - check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_BTIME) { - dm->dqb_btime = di->dqb_btime; - check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); - } - if (di->dqb_valid & QIF_ITIME) { - dm->dqb_itime = di->dqb_itime; - check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); - } - - if (check_blim) { - if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { - dm->dqb_btime = 0; - clear_bit(DQ_BLKS_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ - dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; - } - if (check_ilim) { - if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { - dm->dqb_itime = 0; - clear_bit(DQ_INODES_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ - dm->dqb_itime = get_seconds() + dqi->dqi_igrace; - } - if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) - clear_bit(DQ_FAKE_B, &dquot->dq_flags); - else - set_bit(DQ_FAKE_B, &dquot->dq_flags); - spin_unlock(&dq_data_lock); - mark_dquot_dirty(dquot); - - return 0; -} - -int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) -{ - struct dquot *dquot; - int rc; - - dquot = dqget(sb, id, type); - if (!dquot) { - rc = -ESRCH; - goto out; - } - rc = do_set_dqblk(dquot, di); - dqput(dquot); -out: - return rc; -} -EXPORT_SYMBOL(vfs_set_dqblk); - -/* Generic routine for getting common part of quota file information */ -int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) -{ - struct mem_dqinfo *mi; - - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_active(sb, type)) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return -ESRCH; - } - mi = sb_dqopt(sb)->info + type; - spin_lock(&dq_data_lock); - ii->dqi_bgrace = mi->dqi_bgrace; - ii->dqi_igrace = mi->dqi_igrace; - ii->dqi_flags = mi->dqi_flags & DQF_MASK; - ii->dqi_valid = IIF_ALL; - spin_unlock(&dq_data_lock); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return 0; -} -EXPORT_SYMBOL(vfs_get_dqinfo); - -/* Generic routine for setting common part of quota file information */ -int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) -{ - struct mem_dqinfo *mi; - int err = 0; - - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - if (!sb_has_quota_active(sb, type)) { - err = -ESRCH; - goto out; - } - mi = sb_dqopt(sb)->info + type; - spin_lock(&dq_data_lock); - if (ii->dqi_valid & IIF_BGRACE) - mi->dqi_bgrace = ii->dqi_bgrace; - if (ii->dqi_valid & IIF_IGRACE) - mi->dqi_igrace = ii->dqi_igrace; - if (ii->dqi_valid & IIF_FLAGS) - mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); - spin_unlock(&dq_data_lock); - mark_info_dirty(sb, type); - /* Force write to disk */ - sb->dq_op->write_info(sb, type); -out: - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); - return err; -} -EXPORT_SYMBOL(vfs_set_dqinfo); - -struct quotactl_ops vfs_quotactl_ops = { - .quota_on = vfs_quota_on, - .quota_off = vfs_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk -}; - -static ctl_table fs_dqstats_table[] = { - { - .ctl_name = FS_DQ_LOOKUPS, - .procname = "lookups", - .data = &dqstats.lookups, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_DROPS, - .procname = "drops", - .data = &dqstats.drops, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_READS, - .procname = "reads", - .data = &dqstats.reads, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_WRITES, - .procname = "writes", - .data = &dqstats.writes, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_CACHE_HITS, - .procname = "cache_hits", - .data = &dqstats.cache_hits, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_ALLOCATED, - .procname = "allocated_dquots", - .data = &dqstats.allocated_dquots, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_FREE, - .procname = "free_dquots", - .data = &dqstats.free_dquots, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = FS_DQ_SYNCS, - .procname = "syncs", - .data = &dqstats.syncs, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, -#ifdef CONFIG_PRINT_QUOTA_WARNING - { - .ctl_name = FS_DQ_WARNINGS, - .procname = "warnings", - .data = &flag_print_warnings, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, -#endif - { .ctl_name = 0 }, -}; - -static ctl_table fs_table[] = { - { - .ctl_name = FS_DQSTATS, - .procname = "quota", - .mode = 0555, - .child = fs_dqstats_table, - }, - { .ctl_name = 0 }, -}; - -static ctl_table sys_table[] = { - { - .ctl_name = CTL_FS, - .procname = "fs", - .mode = 0555, - .child = fs_table, - }, - { .ctl_name = 0 }, -}; - -static int __init dquot_init(void) -{ - int i; - unsigned long nr_hash, order; - - printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); - - register_sysctl_table(sys_table); - - dquot_cachep = kmem_cache_create("dquot", - sizeof(struct dquot), sizeof(unsigned long) * 4, - (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), - NULL); - - order = 0; - dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); - if (!dquot_hash) - panic("Cannot create dquot hash table"); - - /* Find power-of-two hlist_heads which can fit into allocation */ - nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); - dq_hash_bits = 0; - do { - dq_hash_bits++; - } while (nr_hash >> dq_hash_bits); - dq_hash_bits--; - - nr_hash = 1UL << dq_hash_bits; - dq_hash_mask = nr_hash - 1; - for (i = 0; i < nr_hash; i++) - INIT_HLIST_HEAD(dquot_hash + i); - - printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", - nr_hash, order, (PAGE_SIZE << order)); - - register_shrinker(&dqcache_shrinker); - -#ifdef CONFIG_QUOTA_NETLINK_INTERFACE - if (genl_register_family("a_genl_family) != 0) - printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); -#endif - - return 0; -} -module_init(dquot_init); diff --git a/fs/quota.c b/fs/quota.c deleted file mode 100644 index d76ada9..0000000 --- a/fs/quota.c +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Quota code necessary even when VFS quota support is not compiled - * into the kernel. The interesting stuff is over in dquot.c, here - * we have symbols for initial quotactl(2) handling, the sysctl(2) - * variables, etc - things needed even when quota support disabled. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Check validity of generic quotactl commands */ -static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) -{ - if (type >= MAXQUOTAS) - return -EINVAL; - if (!sb && cmd != Q_SYNC) - return -ENODEV; - /* Is operation supported? */ - if (sb && !sb->s_qcop) - return -ENOSYS; - - switch (cmd) { - case Q_GETFMT: - break; - case Q_QUOTAON: - if (!sb->s_qcop->quota_on) - return -ENOSYS; - break; - case Q_QUOTAOFF: - if (!sb->s_qcop->quota_off) - return -ENOSYS; - break; - case Q_SETINFO: - if (!sb->s_qcop->set_info) - return -ENOSYS; - break; - case Q_GETINFO: - if (!sb->s_qcop->get_info) - return -ENOSYS; - break; - case Q_SETQUOTA: - if (!sb->s_qcop->set_dqblk) - return -ENOSYS; - break; - case Q_GETQUOTA: - if (!sb->s_qcop->get_dqblk) - return -ENOSYS; - break; - case Q_SYNC: - if (sb && !sb->s_qcop->quota_sync) - return -ENOSYS; - break; - default: - return -EINVAL; - } - - /* Is quota turned on for commands which need it? */ - switch (cmd) { - case Q_GETFMT: - case Q_GETINFO: - case Q_SETINFO: - case Q_SETQUOTA: - case Q_GETQUOTA: - /* This is just informative test so we are satisfied without a lock */ - if (!sb_has_quota_active(sb, type)) - return -ESRCH; - } - - /* Check privileges */ - if (cmd == Q_GETQUOTA) { - if (((type == USRQUOTA && current_euid() != id) || - (type == GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - } - else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - return 0; -} - -/* Check validity of XFS Quota Manager commands */ -static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) -{ - if (type >= XQM_MAXQUOTAS) - return -EINVAL; - if (!sb) - return -ENODEV; - if (!sb->s_qcop) - return -ENOSYS; - - switch (cmd) { - case Q_XQUOTAON: - case Q_XQUOTAOFF: - case Q_XQUOTARM: - if (!sb->s_qcop->set_xstate) - return -ENOSYS; - break; - case Q_XGETQSTAT: - if (!sb->s_qcop->get_xstate) - return -ENOSYS; - break; - case Q_XSETQLIM: - if (!sb->s_qcop->set_xquota) - return -ENOSYS; - break; - case Q_XGETQUOTA: - if (!sb->s_qcop->get_xquota) - return -ENOSYS; - break; - case Q_XQUOTASYNC: - if (!sb->s_qcop->quota_sync) - return -ENOSYS; - break; - default: - return -EINVAL; - } - - /* Check privileges */ - if (cmd == Q_XGETQUOTA) { - if (((type == XQM_USRQUOTA && current_euid() != id) || - (type == XQM_GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - } - - return 0; -} - -static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) -{ - int error; - - if (XQM_COMMAND(cmd)) - error = xqm_quotactl_valid(sb, type, cmd, id); - else - error = generic_quotactl_valid(sb, type, cmd, id); - if (!error) - error = security_quotactl(cmd, type, id, sb); - return error; -} - -static void quota_sync_sb(struct super_block *sb, int type) -{ - int cnt; - - sb->s_qcop->quota_sync(sb, type); - - if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) - return; - /* This is not very clever (and fast) but currently I don't know about - * any other simple way of getting quota data to disk and we must get - * them there for userspace to be visible... */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); - - /* - * Now when everything is written we can discard the pagecache so - * that userspace sees the changes. - */ - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && cnt != type) - continue; - if (!sb_has_quota_active(sb, cnt)) - continue; - mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); - truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); - mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); - } - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); -} - -void sync_dquots(struct super_block *sb, int type) -{ - int cnt; - - if (sb) { - if (sb->s_qcop->quota_sync) - quota_sync_sb(sb, type); - return; - } - - spin_lock(&sb_lock); -restart: - list_for_each_entry(sb, &super_blocks, s_list) { - /* This test just improves performance so it needn't be reliable... */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (type != -1 && type != cnt) - continue; - if (!sb_has_quota_active(sb, cnt)) - continue; - if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && - list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) - continue; - break; - } - if (cnt == MAXQUOTAS) - continue; - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); - if (sb->s_root && sb->s_qcop->quota_sync) - quota_sync_sb(sb, type); - up_read(&sb->s_umount); - spin_lock(&sb_lock); - if (__put_super_and_need_restart(sb)) - goto restart; - } - spin_unlock(&sb_lock); -} - -/* Copy parameters and call proper function */ -static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) -{ - int ret; - - switch (cmd) { - case Q_QUOTAON: { - char *pathname; - - if (IS_ERR(pathname = getname(addr))) - return PTR_ERR(pathname); - ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); - putname(pathname); - return ret; - } - case Q_QUOTAOFF: - return sb->s_qcop->quota_off(sb, type, 0); - - case Q_GETFMT: { - __u32 fmt; - - down_read(&sb_dqopt(sb)->dqptr_sem); - if (!sb_has_quota_active(sb, type)) { - up_read(&sb_dqopt(sb)->dqptr_sem); - return -ESRCH; - } - fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; - up_read(&sb_dqopt(sb)->dqptr_sem); - if (copy_to_user(addr, &fmt, sizeof(fmt))) - return -EFAULT; - return 0; - } - case Q_GETINFO: { - struct if_dqinfo info; - - if ((ret = sb->s_qcop->get_info(sb, type, &info))) - return ret; - if (copy_to_user(addr, &info, sizeof(info))) - return -EFAULT; - return 0; - } - case Q_SETINFO: { - struct if_dqinfo info; - - if (copy_from_user(&info, addr, sizeof(info))) - return -EFAULT; - return sb->s_qcop->set_info(sb, type, &info); - } - case Q_GETQUOTA: { - struct if_dqblk idq; - - if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) - return ret; - if (copy_to_user(addr, &idq, sizeof(idq))) - return -EFAULT; - return 0; - } - case Q_SETQUOTA: { - struct if_dqblk idq; - - if (copy_from_user(&idq, addr, sizeof(idq))) - return -EFAULT; - return sb->s_qcop->set_dqblk(sb, type, id, &idq); - } - case Q_SYNC: - sync_dquots(sb, type); - return 0; - - case Q_XQUOTAON: - case Q_XQUOTAOFF: - case Q_XQUOTARM: { - __u32 flags; - - if (copy_from_user(&flags, addr, sizeof(flags))) - return -EFAULT; - return sb->s_qcop->set_xstate(sb, flags, cmd); - } - case Q_XGETQSTAT: { - struct fs_quota_stat fqs; - - if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) - return ret; - if (copy_to_user(addr, &fqs, sizeof(fqs))) - return -EFAULT; - return 0; - } - case Q_XSETQLIM: { - struct fs_disk_quota fdq; - - if (copy_from_user(&fdq, addr, sizeof(fdq))) - return -EFAULT; - return sb->s_qcop->set_xquota(sb, type, id, &fdq); - } - case Q_XGETQUOTA: { - struct fs_disk_quota fdq; - - if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) - return ret; - if (copy_to_user(addr, &fdq, sizeof(fdq))) - return -EFAULT; - return 0; - } - case Q_XQUOTASYNC: - return sb->s_qcop->quota_sync(sb, type); - /* We never reach here unless validity check is broken */ - default: - BUG(); - } - return 0; -} - -/* - * look up a superblock on which quota ops will be performed - * - use the name of a block device to find the superblock thereon - */ -static inline struct super_block *quotactl_block(const char __user *special) -{ -#ifdef CONFIG_BLOCK - struct block_device *bdev; - struct super_block *sb; - char *tmp = getname(special); - - if (IS_ERR(tmp)) - return ERR_CAST(tmp); - bdev = lookup_bdev(tmp); - putname(tmp); - if (IS_ERR(bdev)) - return ERR_CAST(bdev); - sb = get_super(bdev); - bdput(bdev); - if (!sb) - return ERR_PTR(-ENODEV); - - return sb; -#else - return ERR_PTR(-ENODEV); -#endif -} - -/* - * This is the system call interface. This communicates with - * the user-level programs. Currently this only supports diskquota - * calls. Maybe we need to add the process quotas etc. in the future, - * but we probably should use rlimits for that. - */ -SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, - qid_t, id, void __user *, addr) -{ - uint cmds, type; - struct super_block *sb = NULL; - int ret; - - cmds = cmd >> SUBCMDSHIFT; - type = cmd & SUBCMDMASK; - - if (cmds != Q_SYNC || special) { - sb = quotactl_block(special); - if (IS_ERR(sb)) - return PTR_ERR(sb); - } - - ret = check_quotactl_valid(sb, type, cmds, id); - if (ret >= 0) - ret = do_quotactl(sb, type, cmds, id, addr); - if (sb) - drop_super(sb); - - return ret; -} - -#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) -/* - * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) - * and is necessary due to alignment problems. - */ -struct compat_if_dqblk { - compat_u64 dqb_bhardlimit; - compat_u64 dqb_bsoftlimit; - compat_u64 dqb_curspace; - compat_u64 dqb_ihardlimit; - compat_u64 dqb_isoftlimit; - compat_u64 dqb_curinodes; - compat_u64 dqb_btime; - compat_u64 dqb_itime; - compat_uint_t dqb_valid; -}; - -/* XFS structures */ -struct compat_fs_qfilestat { - compat_u64 dqb_bhardlimit; - compat_u64 qfs_nblks; - compat_uint_t qfs_nextents; -}; - -struct compat_fs_quota_stat { - __s8 qs_version; - __u16 qs_flags; - __s8 qs_pad; - struct compat_fs_qfilestat qs_uquota; - struct compat_fs_qfilestat qs_gquota; - compat_uint_t qs_incoredqs; - compat_int_t qs_btimelimit; - compat_int_t qs_itimelimit; - compat_int_t qs_rtbtimelimit; - __u16 qs_bwarnlimit; - __u16 qs_iwarnlimit; -}; - -asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, - qid_t id, void __user *addr) -{ - unsigned int cmds; - struct if_dqblk __user *dqblk; - struct compat_if_dqblk __user *compat_dqblk; - struct fs_quota_stat __user *fsqstat; - struct compat_fs_quota_stat __user *compat_fsqstat; - compat_uint_t data; - u16 xdata; - long ret; - - cmds = cmd >> SUBCMDSHIFT; - - switch (cmds) { - case Q_GETQUOTA: - dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); - compat_dqblk = addr; - ret = sys_quotactl(cmd, special, id, dqblk); - if (ret) - break; - if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || - get_user(data, &dqblk->dqb_valid) || - put_user(data, &compat_dqblk->dqb_valid)) - ret = -EFAULT; - break; - case Q_SETQUOTA: - dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); - compat_dqblk = addr; - ret = -EFAULT; - if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || - get_user(data, &compat_dqblk->dqb_valid) || - put_user(data, &dqblk->dqb_valid)) - break; - ret = sys_quotactl(cmd, special, id, dqblk); - break; - case Q_XGETQSTAT: - fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); - compat_fsqstat = addr; - ret = sys_quotactl(cmd, special, id, fsqstat); - if (ret) - break; - ret = -EFAULT; - /* Copying qs_version, qs_flags, qs_pad */ - if (copy_in_user(compat_fsqstat, fsqstat, - offsetof(struct compat_fs_quota_stat, qs_uquota))) - break; - /* Copying qs_uquota */ - if (copy_in_user(&compat_fsqstat->qs_uquota, - &fsqstat->qs_uquota, - sizeof(compat_fsqstat->qs_uquota)) || - get_user(data, &fsqstat->qs_uquota.qfs_nextents) || - put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) - break; - /* Copying qs_gquota */ - if (copy_in_user(&compat_fsqstat->qs_gquota, - &fsqstat->qs_gquota, - sizeof(compat_fsqstat->qs_gquota)) || - get_user(data, &fsqstat->qs_gquota.qfs_nextents) || - put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) - break; - /* Copying the rest */ - if (copy_in_user(&compat_fsqstat->qs_incoredqs, - &fsqstat->qs_incoredqs, - sizeof(struct compat_fs_quota_stat) - - offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || - get_user(xdata, &fsqstat->qs_iwarnlimit) || - put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) - break; - ret = 0; - break; - default: - ret = sys_quotactl(cmd, special, id, addr); - } - return ret; -} -#endif diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig new file mode 100644 index 0000000..d9750d8 --- /dev/null +++ b/fs/quota/Kconfig @@ -0,0 +1,59 @@ +# +# Quota configuration +# + +config QUOTA + bool "Quota support" + help + If you say Y here, you will be able to set per user limits for disk + usage (also called disk quotas). Currently, it works for the + ext2, ext3, and reiserfs file system. ext3 also supports journalled + quotas for which you don't need to run quotacheck(8) after an unclean + shutdown. + For further details, read the Quota mini-HOWTO, available from + , or the documentation provided + with the quota tools. Probably the quota support is only useful for + multi user systems. If unsure, say N. + +config QUOTA_NETLINK_INTERFACE + bool "Report quota messages through netlink interface" + depends on QUOTA && NET + help + If you say Y here, quota warnings (about exceeding softlimit, reaching + hardlimit, etc.) will be reported through netlink interface. If unsure, + say Y. + +config PRINT_QUOTA_WARNING + bool "Print quota warnings to console (OBSOLETE)" + depends on QUOTA + default y + help + If you say Y here, quota warnings (about exceeding softlimit, reaching + hardlimit, etc.) will be printed to the process' controlling terminal. + Note that this behavior is currently deprecated and may go away in + future. Please use notification via netlink socket instead. + +# Generic support for tree structured quota files. Seleted when needed. +config QUOTA_TREE + tristate + +config QFMT_V1 + tristate "Old quota format support" + depends on QUOTA + help + This quota format was (is) used by kernels earlier than 2.4.22. If + you have quota working and you don't want to convert to new quota + format say Y here. + +config QFMT_V2 + tristate "Quota format v2 support" + depends on QUOTA + select QUOTA_TREE + help + This quota format allows using quotas with 32-bit UIDs/GIDs. If you + need this functionality say Y here. + +config QUOTACTL + bool + depends on XFS_QUOTA || QUOTA + default y diff --git a/fs/quota/Makefile b/fs/quota/Makefile new file mode 100644 index 0000000..385a083 --- /dev/null +++ b/fs/quota/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Linux filesystems. +# +# 14 Sep 2000, Christoph Hellwig +# Rewritten to use lists instead of if-statements. +# + +obj-y := + +obj-$(CONFIG_QUOTA) += dquot.o +obj-$(CONFIG_QFMT_V1) += quota_v1.o +obj-$(CONFIG_QFMT_V2) += quota_v2.o +obj-$(CONFIG_QUOTA_TREE) += quota_tree.o +obj-$(CONFIG_QUOTACTL) += quota.o diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c new file mode 100644 index 0000000..28aa146 --- /dev/null +++ b/fs/quota/dquot.c @@ -0,0 +1,2564 @@ +/* + * Implementation of the diskquota system for the LINUX operating system. QUOTA + * is implemented using the BSD system call interface as the means of + * communication with the user level. This file contains the generic routines + * called by the different filesystems on allocation of an inode or block. + * These routines take care of the administration needed to have a consistent + * diskquota tracking system. The ideas of both user and group quotas are based + * on the Melbourne quota system as used on BSD derived systems. The internal + * implementation is based on one of the several variants of the LINUX + * inode-subsystem with added complexity of the diskquota system. + * + * Author: Marco van Wieringen + * + * Fixes: Dmitry Gorodchanin , 11 Feb 96 + * + * Revised list management to avoid races + * -- Bill Hawes, , 9/98 + * + * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). + * As the consequence the locking was moved from dquot_decr_...(), + * dquot_incr_...() to calling functions. + * invalidate_dquots() now writes modified dquots. + * Serialized quota_off() and quota_on() for mount point. + * Fixed a few bugs in grow_dquots(). + * Fixed deadlock in write_dquot() - we no longer account quotas on + * quota files + * remove_dquot_ref() moved to inode.c - it now traverses through inodes + * add_dquot_ref() restarts after blocking + * Added check for bogus uid and fixed check for group in quotactl. + * Jan Kara, , sponsored by SuSE CR, 10-11/99 + * + * Used struct list_head instead of own list struct + * Invalidation of referenced dquots is no longer possible + * Improved free_dquots list management + * Quota and i_blocks are now updated in one place to avoid races + * Warnings are now delayed so we won't block in critical section + * Write updated not to require dquot lock + * Jan Kara, , 9/2000 + * + * Added dynamic quota structure allocation + * Jan Kara 12/2000 + * + * Rewritten quota interface. Implemented new quota format and + * formats registering. + * Jan Kara, , 2001,2002 + * + * New SMP locking. + * Jan Kara, , 10/2002 + * + * Added journalled quota support, fix lock inversion problems + * Jan Kara, , 2003,2004 + * + * (C) Copyright 1994 - 1997 Marco van Wieringen + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for inode_lock, oddly enough.. */ +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE +#include +#include +#endif + +#include + +#define __DQUOT_PARANOIA + +/* + * There are three quota SMP locks. dq_list_lock protects all lists with quotas + * and quota formats, dqstats structure containing statistics about the lists + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and + * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. + * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly + * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects + * modifications of quota state (on quotaon and quotaoff) and readers who care + * about latest values take it as well. + * + * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, + * dq_list_lock > dq_state_lock + * + * Note that some things (eg. sb pointer, type, id) doesn't change during + * the life of the dquot structure and so needn't to be protected by a lock + * + * Any operation working on dquots via inode pointers must hold dqptr_sem. If + * operation is just reading pointers from inode (or not using them at all) the + * read lock is enough. If pointers are altered function must hold write lock + * (these locking rules also apply for S_NOQUOTA flag in the inode - note that + * for altering the flag i_mutex is also needed). + * + * Each dquot has its dq_lock mutex. Locked dquots might not be referenced + * from inodes (dquot_alloc_space() and such don't check the dq_lock). + * Currently dquot is locked only when it is being read to memory (or space for + * it is being allocated) on the first dqget() and when it is being released on + * the last dqput(). The allocation and release oparations are serialized by + * the dq_lock and by checking the use count in dquot_release(). Write + * operations on dquots don't hold dq_lock as they copy data under dq_data_lock + * spinlock to internal buffers before writing. + * + * Lock ordering (including related VFS locks) is the following: + * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > + * dqio_mutex + * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > + * dqptr_sem. But filesystem has to count with the fact that functions such as + * dquot_alloc_space() acquire dqptr_sem and they usually have to be called + * from inside a transaction to keep filesystem consistency after a crash. Also + * filesystems usually want to do some IO on dquot from ->mark_dirty which is + * called with dqptr_sem held. + * i_mutex on quota files is special (it's below dqio_mutex) + */ + +static DEFINE_SPINLOCK(dq_list_lock); +static DEFINE_SPINLOCK(dq_state_lock); +DEFINE_SPINLOCK(dq_data_lock); +EXPORT_SYMBOL(dq_data_lock); + +static char *quotatypes[] = INITQFNAMES; +static struct quota_format_type *quota_formats; /* List of registered formats */ +static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; + +/* SLAB cache for dquot structures */ +static struct kmem_cache *dquot_cachep; + +int register_quota_format(struct quota_format_type *fmt) +{ + spin_lock(&dq_list_lock); + fmt->qf_next = quota_formats; + quota_formats = fmt; + spin_unlock(&dq_list_lock); + return 0; +} +EXPORT_SYMBOL(register_quota_format); + +void unregister_quota_format(struct quota_format_type *fmt) +{ + struct quota_format_type **actqf; + + spin_lock(&dq_list_lock); + for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); + if (*actqf) + *actqf = (*actqf)->qf_next; + spin_unlock(&dq_list_lock); +} +EXPORT_SYMBOL(unregister_quota_format); + +static struct quota_format_type *find_quota_format(int id) +{ + struct quota_format_type *actqf; + + spin_lock(&dq_list_lock); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (!actqf || !try_module_get(actqf->qf_owner)) { + int qm; + + spin_unlock(&dq_list_lock); + + for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); + if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) + return NULL; + + spin_lock(&dq_list_lock); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (actqf && !try_module_get(actqf->qf_owner)) + actqf = NULL; + } + spin_unlock(&dq_list_lock); + return actqf; +} + +static void put_quota_format(struct quota_format_type *fmt) +{ + module_put(fmt->qf_owner); +} + +/* + * Dquot List Management: + * The quota code uses three lists for dquot management: the inuse_list, + * free_dquots, and dquot_hash[] array. A single dquot structure may be + * on all three lists, depending on its current state. + * + * All dquots are placed to the end of inuse_list when first created, and this + * list is used for invalidate operation, which must look at every dquot. + * + * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, + * and this list is searched whenever we need an available dquot. Dquots are + * removed from the list as soon as they are used again, and + * dqstats.free_dquots gives the number of dquots on the list. When + * dquot is invalidated it's completely released from memory. + * + * Dquots with a specific identity (device, type and id) are placed on + * one of the dquot_hash[] hash chains. The provides an efficient search + * mechanism to locate a specific dquot. + */ + +static LIST_HEAD(inuse_list); +static LIST_HEAD(free_dquots); +static unsigned int dq_hash_bits, dq_hash_mask; +static struct hlist_head *dquot_hash; + +struct dqstats dqstats; +EXPORT_SYMBOL(dqstats); + +static inline unsigned int +hashfn(const struct super_block *sb, unsigned int id, int type) +{ + unsigned long tmp; + + tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); + return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; +} + +/* + * Following list functions expect dq_list_lock to be held + */ +static inline void insert_dquot_hash(struct dquot *dquot) +{ + struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); + hlist_add_head(&dquot->dq_hash, head); +} + +static inline void remove_dquot_hash(struct dquot *dquot) +{ + hlist_del_init(&dquot->dq_hash); +} + +static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) +{ + struct hlist_node *node; + struct dquot *dquot; + + hlist_for_each (node, dquot_hash+hashent) { + dquot = hlist_entry(node, struct dquot, dq_hash); + if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) + return dquot; + } + return NODQUOT; +} + +/* Add a dquot to the tail of the free list */ +static inline void put_dquot_last(struct dquot *dquot) +{ + list_add_tail(&dquot->dq_free, &free_dquots); + dqstats.free_dquots++; +} + +static inline void remove_free_dquot(struct dquot *dquot) +{ + if (list_empty(&dquot->dq_free)) + return; + list_del_init(&dquot->dq_free); + dqstats.free_dquots--; +} + +static inline void put_inuse(struct dquot *dquot) +{ + /* We add to the back of inuse list so we don't have to restart + * when traversing this list and we block */ + list_add_tail(&dquot->dq_inuse, &inuse_list); + dqstats.allocated_dquots++; +} + +static inline void remove_inuse(struct dquot *dquot) +{ + dqstats.allocated_dquots--; + list_del(&dquot->dq_inuse); +} +/* + * End of list functions needing dq_list_lock + */ + +static void wait_on_dquot(struct dquot *dquot) +{ + mutex_lock(&dquot->dq_lock); + mutex_unlock(&dquot->dq_lock); +} + +static inline int dquot_dirty(struct dquot *dquot) +{ + return test_bit(DQ_MOD_B, &dquot->dq_flags); +} + +static inline int mark_dquot_dirty(struct dquot *dquot) +{ + return dquot->dq_sb->dq_op->mark_dirty(dquot); +} + +int dquot_mark_dquot_dirty(struct dquot *dquot) +{ + spin_lock(&dq_list_lock); + if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) + list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> + info[dquot->dq_type].dqi_dirty_list); + spin_unlock(&dq_list_lock); + return 0; +} +EXPORT_SYMBOL(dquot_mark_dquot_dirty); + +/* This function needs dq_list_lock */ +static inline int clear_dquot_dirty(struct dquot *dquot) +{ + if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) + return 0; + list_del_init(&dquot->dq_dirty); + return 1; +} + +void mark_info_dirty(struct super_block *sb, int type) +{ + set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); +} +EXPORT_SYMBOL(mark_info_dirty); + +/* + * Read dquot from disk and alloc space for it + */ + +int dquot_acquire(struct dquot *dquot) +{ + int ret = 0, ret2 = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dquot->dq_lock); + mutex_lock(&dqopt->dqio_mutex); + if (!test_bit(DQ_READ_B, &dquot->dq_flags)) + ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); + if (ret < 0) + goto out_iolock; + set_bit(DQ_READ_B, &dquot->dq_flags); + /* Instantiate dquot if needed */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); + /* Write the info if needed */ + if (info_dirty(&dqopt->info[dquot->dq_type])) + ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (ret < 0) + goto out_iolock; + if (ret2 < 0) { + ret = ret2; + goto out_iolock; + } + } + set_bit(DQ_ACTIVE_B, &dquot->dq_flags); +out_iolock: + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&dquot->dq_lock); + return ret; +} +EXPORT_SYMBOL(dquot_acquire); + +/* + * Write dquot to disk + */ +int dquot_commit(struct dquot *dquot) +{ + int ret = 0, ret2 = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dqopt->dqio_mutex); + spin_lock(&dq_list_lock); + if (!clear_dquot_dirty(dquot)) { + spin_unlock(&dq_list_lock); + goto out_sem; + } + spin_unlock(&dq_list_lock); + /* Inactive dquot can be only if there was error during read/init + * => we have better not writing it */ + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); + if (info_dirty(&dqopt->info[dquot->dq_type])) + ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (ret >= 0) + ret = ret2; + } +out_sem: + mutex_unlock(&dqopt->dqio_mutex); + return ret; +} +EXPORT_SYMBOL(dquot_commit); + +/* + * Release dquot + */ +int dquot_release(struct dquot *dquot) +{ + int ret = 0, ret2 = 0; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + mutex_lock(&dquot->dq_lock); + /* Check whether we are not racing with some other dqget() */ + if (atomic_read(&dquot->dq_count) > 1) + goto out_dqlock; + mutex_lock(&dqopt->dqio_mutex); + if (dqopt->ops[dquot->dq_type]->release_dqblk) { + ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); + /* Write the info */ + if (info_dirty(&dqopt->info[dquot->dq_type])) + ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (ret >= 0) + ret = ret2; + } + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); + mutex_unlock(&dqopt->dqio_mutex); +out_dqlock: + mutex_unlock(&dquot->dq_lock); + return ret; +} +EXPORT_SYMBOL(dquot_release); + +void dquot_destroy(struct dquot *dquot) +{ + kmem_cache_free(dquot_cachep, dquot); +} +EXPORT_SYMBOL(dquot_destroy); + +static inline void do_destroy_dquot(struct dquot *dquot) +{ + dquot->dq_sb->dq_op->destroy_dquot(dquot); +} + +/* Invalidate all dquots on the list. Note that this function is called after + * quota is disabled and pointers from inodes removed so there cannot be new + * quota users. There can still be some users of quotas due to inodes being + * just deleted or pruned by prune_icache() (those are not attached to any + * list) or parallel quotactl call. We have to wait for such users. + */ +static void invalidate_dquots(struct super_block *sb, int type) +{ + struct dquot *dquot, *tmp; + +restart: + spin_lock(&dq_list_lock); + list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { + if (dquot->dq_sb != sb) + continue; + if (dquot->dq_type != type) + continue; + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { + DEFINE_WAIT(wait); + + atomic_inc(&dquot->dq_count); + prepare_to_wait(&dquot->dq_wait_unused, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&dq_list_lock); + /* Once dqput() wakes us up, we know it's time to free + * the dquot. + * IMPORTANT: we rely on the fact that there is always + * at most one process waiting for dquot to free. + * Otherwise dq_count would be > 1 and we would never + * wake up. + */ + if (atomic_read(&dquot->dq_count) > 1) + schedule(); + finish_wait(&dquot->dq_wait_unused, &wait); + dqput(dquot); + /* At this moment dquot() need not exist (it could be + * reclaimed by prune_dqcache(). Hence we must + * restart. */ + goto restart; + } + /* + * Quota now has no users and it has been written on last + * dqput() + */ + remove_dquot_hash(dquot); + remove_free_dquot(dquot); + remove_inuse(dquot); + do_destroy_dquot(dquot); + } + spin_unlock(&dq_list_lock); +} + +/* Call callback for every active dquot on given filesystem */ +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv) +{ + struct dquot *dquot, *old_dquot = NULL; + int ret = 0; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + spin_lock(&dq_list_lock); + list_for_each_entry(dquot, &inuse_list, dq_inuse) { + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) + continue; + if (dquot->dq_sb != sb) + continue; + /* Now we have active dquot so we can just increase use count */ + atomic_inc(&dquot->dq_count); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + dqput(old_dquot); + old_dquot = dquot; + ret = fn(dquot, priv); + if (ret < 0) + goto out; + spin_lock(&dq_list_lock); + /* We are safe to continue now because our dquot could not + * be moved out of the inuse list while we hold the reference */ + } + spin_unlock(&dq_list_lock); +out: + dqput(old_dquot); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return ret; +} +EXPORT_SYMBOL(dquot_scan_active); + +int vfs_quota_sync(struct super_block *sb, int type) +{ + struct list_head *dirty; + struct dquot *dquot; + struct quota_info *dqopt = sb_dqopt(sb); + int cnt; + + mutex_lock(&dqopt->dqonoff_mutex); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + spin_lock(&dq_list_lock); + dirty = &dqopt->info[cnt].dqi_dirty_list; + while (!list_empty(dirty)) { + dquot = list_first_entry(dirty, struct dquot, dq_dirty); + /* Dirty and inactive can be only bad dquot... */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + clear_dquot_dirty(dquot); + continue; + } + /* Now we have active dquot from which someone is + * holding reference so we can safely just increase + * use count */ + atomic_inc(&dquot->dq_count); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + sb->dq_op->write_dquot(dquot); + dqput(dquot); + spin_lock(&dq_list_lock); + } + spin_unlock(&dq_list_lock); + } + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) + && info_dirty(&dqopt->info[cnt])) + sb->dq_op->write_info(sb, cnt); + spin_lock(&dq_list_lock); + dqstats.syncs++; + spin_unlock(&dq_list_lock); + mutex_unlock(&dqopt->dqonoff_mutex); + + return 0; +} +EXPORT_SYMBOL(vfs_quota_sync); + +/* Free unused dquots from cache */ +static void prune_dqcache(int count) +{ + struct list_head *head; + struct dquot *dquot; + + head = free_dquots.prev; + while (head != &free_dquots && count) { + dquot = list_entry(head, struct dquot, dq_free); + remove_dquot_hash(dquot); + remove_free_dquot(dquot); + remove_inuse(dquot); + do_destroy_dquot(dquot); + count--; + head = free_dquots.prev; + } +} + +/* + * This is called from kswapd when we think we need some + * more memory + */ + +static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) +{ + if (nr) { + spin_lock(&dq_list_lock); + prune_dqcache(nr); + spin_unlock(&dq_list_lock); + } + return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; +} + +static struct shrinker dqcache_shrinker = { + .shrink = shrink_dqcache_memory, + .seeks = DEFAULT_SEEKS, +}; + +/* + * Put reference to dquot + * NOTE: If you change this function please check whether dqput_blocks() works right... + */ +void dqput(struct dquot *dquot) +{ + int ret; + + if (!dquot) + return; +#ifdef __DQUOT_PARANOIA + if (!atomic_read(&dquot->dq_count)) { + printk("VFS: dqput: trying to free free dquot\n"); + printk("VFS: device %s, dquot of %s %d\n", + dquot->dq_sb->s_id, + quotatypes[dquot->dq_type], + dquot->dq_id); + BUG(); + } +#endif + + spin_lock(&dq_list_lock); + dqstats.drops++; + spin_unlock(&dq_list_lock); +we_slept: + spin_lock(&dq_list_lock); + if (atomic_read(&dquot->dq_count) > 1) { + /* We have more than one user... nothing to do */ + atomic_dec(&dquot->dq_count); + /* Releasing dquot during quotaoff phase? */ + if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && + atomic_read(&dquot->dq_count) == 1) + wake_up(&dquot->dq_wait_unused); + spin_unlock(&dq_list_lock); + return; + } + /* Need to release dquot? */ + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { + spin_unlock(&dq_list_lock); + /* Commit dquot before releasing */ + ret = dquot->dq_sb->dq_op->write_dquot(dquot); + if (ret < 0) { + printk(KERN_ERR "VFS: cannot write quota structure on " + "device %s (error %d). Quota may get out of " + "sync!\n", dquot->dq_sb->s_id, ret); + /* + * We clear dirty bit anyway, so that we avoid + * infinite loop here + */ + spin_lock(&dq_list_lock); + clear_dquot_dirty(dquot); + spin_unlock(&dq_list_lock); + } + goto we_slept; + } + /* Clear flag in case dquot was inactive (something bad happened) */ + clear_dquot_dirty(dquot); + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + spin_unlock(&dq_list_lock); + dquot->dq_sb->dq_op->release_dquot(dquot); + goto we_slept; + } + atomic_dec(&dquot->dq_count); +#ifdef __DQUOT_PARANOIA + /* sanity check */ + BUG_ON(!list_empty(&dquot->dq_free)); +#endif + put_dquot_last(dquot); + spin_unlock(&dq_list_lock); +} +EXPORT_SYMBOL(dqput); + +struct dquot *dquot_alloc(struct super_block *sb, int type) +{ + return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); +} +EXPORT_SYMBOL(dquot_alloc); + +static struct dquot *get_empty_dquot(struct super_block *sb, int type) +{ + struct dquot *dquot; + + dquot = sb->dq_op->alloc_dquot(sb, type); + if(!dquot) + return NODQUOT; + + mutex_init(&dquot->dq_lock); + INIT_LIST_HEAD(&dquot->dq_free); + INIT_LIST_HEAD(&dquot->dq_inuse); + INIT_HLIST_NODE(&dquot->dq_hash); + INIT_LIST_HEAD(&dquot->dq_dirty); + init_waitqueue_head(&dquot->dq_wait_unused); + dquot->dq_sb = sb; + dquot->dq_type = type; + atomic_set(&dquot->dq_count, 1); + + return dquot; +} + +/* + * Get reference to dquot + * + * Locking is slightly tricky here. We are guarded from parallel quotaoff() + * destroying our dquot by: + * a) checking for quota flags under dq_list_lock and + * b) getting a reference to dquot before we release dq_list_lock + */ +struct dquot *dqget(struct super_block *sb, unsigned int id, int type) +{ + unsigned int hashent = hashfn(sb, id, type); + struct dquot *dquot = NODQUOT, *empty = NODQUOT; + + if (!sb_has_quota_active(sb, type)) + return NODQUOT; +we_slept: + spin_lock(&dq_list_lock); + spin_lock(&dq_state_lock); + if (!sb_has_quota_active(sb, type)) { + spin_unlock(&dq_state_lock); + spin_unlock(&dq_list_lock); + goto out; + } + spin_unlock(&dq_state_lock); + + if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { + if (empty == NODQUOT) { + spin_unlock(&dq_list_lock); + if ((empty = get_empty_dquot(sb, type)) == NODQUOT) + schedule(); /* Try to wait for a moment... */ + goto we_slept; + } + dquot = empty; + empty = NODQUOT; + dquot->dq_id = id; + /* all dquots go on the inuse_list */ + put_inuse(dquot); + /* hash it first so it can be found */ + insert_dquot_hash(dquot); + dqstats.lookups++; + spin_unlock(&dq_list_lock); + } else { + if (!atomic_read(&dquot->dq_count)) + remove_free_dquot(dquot); + atomic_inc(&dquot->dq_count); + dqstats.cache_hits++; + dqstats.lookups++; + spin_unlock(&dq_list_lock); + } + /* Wait for dq_lock - after this we know that either dquot_release() is already + * finished or it will be canceled due to dq_count > 1 test */ + wait_on_dquot(dquot); + /* Read the dquot and instantiate it (everything done only if needed) */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { + dqput(dquot); + dquot = NODQUOT; + goto out; + } +#ifdef __DQUOT_PARANOIA + BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ +#endif +out: + if (empty) + do_destroy_dquot(empty); + + return dquot; +} +EXPORT_SYMBOL(dqget); + +static int dqinit_needed(struct inode *inode, int type) +{ + int cnt; + + if (IS_NOQUOTA(inode)) + return 0; + if (type != -1) + return inode->i_dquot[type] == NODQUOT; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt] == NODQUOT) + return 1; + return 0; +} + +/* This routine is guarded by dqonoff_mutex mutex */ +static void add_dquot_ref(struct super_block *sb, int type) +{ + struct inode *inode, *old_inode = NULL; + + spin_lock(&inode_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + if (!atomic_read(&inode->i_writecount)) + continue; + if (!dqinit_needed(inode, type)) + continue; + if (inode->i_state & (I_FREEING|I_WILL_FREE)) + continue; + + __iget(inode); + spin_unlock(&inode_lock); + + iput(old_inode); + sb->dq_op->initialize(inode, type); + /* We hold a reference to 'inode' so it couldn't have been + * removed from s_inodes list while we dropped the inode_lock. + * We cannot iput the inode now as we can be holding the last + * reference and we cannot iput it under inode_lock. So we + * keep the reference and iput it later. */ + old_inode = inode; + spin_lock(&inode_lock); + } + spin_unlock(&inode_lock); + iput(old_inode); +} + +/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ +static inline int dqput_blocks(struct dquot *dquot) +{ + if (atomic_read(&dquot->dq_count) <= 1) + return 1; + return 0; +} + +/* Remove references to dquots from inode - add dquot to list for freeing if needed */ +/* We can't race with anybody because we hold dqptr_sem for writing... */ +static int remove_inode_dquot_ref(struct inode *inode, int type, + struct list_head *tofree_head) +{ + struct dquot *dquot = inode->i_dquot[type]; + + inode->i_dquot[type] = NODQUOT; + if (dquot != NODQUOT) { + if (dqput_blocks(dquot)) { +#ifdef __DQUOT_PARANOIA + if (atomic_read(&dquot->dq_count) != 1) + printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); +#endif + spin_lock(&dq_list_lock); + list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ + spin_unlock(&dq_list_lock); + return 1; + } + else + dqput(dquot); /* We have guaranteed we won't block */ + } + return 0; +} + +/* Free list of dquots - called from inode.c */ +/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ +static void put_dquot_list(struct list_head *tofree_head) +{ + struct list_head *act_head; + struct dquot *dquot; + + act_head = tofree_head->next; + /* So now we have dquots on the list... Just free them */ + while (act_head != tofree_head) { + dquot = list_entry(act_head, struct dquot, dq_free); + act_head = act_head->next; + list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ + dqput(dquot); + } +} + +static void remove_dquot_ref(struct super_block *sb, int type, + struct list_head *tofree_head) +{ + struct inode *inode; + + spin_lock(&inode_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + if (!IS_NOQUOTA(inode)) + remove_inode_dquot_ref(inode, type, tofree_head); + } + spin_unlock(&inode_lock); +} + +/* Gather all references from inodes and drop them */ +static void drop_dquot_ref(struct super_block *sb, int type) +{ + LIST_HEAD(tofree_head); + + if (sb->dq_op) { + down_write(&sb_dqopt(sb)->dqptr_sem); + remove_dquot_ref(sb, type, &tofree_head); + up_write(&sb_dqopt(sb)->dqptr_sem); + put_dquot_list(&tofree_head); + } +} + +static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_curinodes += number; +} + +static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_curspace += number; +} + +static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace += number; +} + +/* + * Claim reserved quota space + */ +static void dquot_claim_reserved_space(struct dquot *dquot, + qsize_t number) +{ + WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); + dquot->dq_dqb.dqb_curspace += number; + dquot->dq_dqb.dqb_rsvspace -= number; +} + +static inline +void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) +{ + dquot->dq_dqb.dqb_rsvspace -= number; +} + +static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) +{ + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || + dquot->dq_dqb.dqb_curinodes >= number) + dquot->dq_dqb.dqb_curinodes -= number; + else + dquot->dq_dqb.dqb_curinodes = 0; + if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) + dquot->dq_dqb.dqb_itime = (time_t) 0; + clear_bit(DQ_INODES_B, &dquot->dq_flags); +} + +static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) +{ + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || + dquot->dq_dqb.dqb_curspace >= number) + dquot->dq_dqb.dqb_curspace -= number; + else + dquot->dq_dqb.dqb_curspace = 0; + if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) + dquot->dq_dqb.dqb_btime = (time_t) 0; + clear_bit(DQ_BLKS_B, &dquot->dq_flags); +} + +static int warning_issued(struct dquot *dquot, const int warntype) +{ + int flag = (warntype == QUOTA_NL_BHARDWARN || + warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : + ((warntype == QUOTA_NL_IHARDWARN || + warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); + + if (!flag) + return 0; + return test_and_set_bit(flag, &dquot->dq_flags); +} + +#ifdef CONFIG_PRINT_QUOTA_WARNING +static int flag_print_warnings = 1; + +static inline int need_print_warning(struct dquot *dquot) +{ + if (!flag_print_warnings) + return 0; + + switch (dquot->dq_type) { + case USRQUOTA: + return current_fsuid() == dquot->dq_id; + case GRPQUOTA: + return in_group_p(dquot->dq_id); + } + return 0; +} + +/* Print warning to user which exceeded quota */ +static void print_warning(struct dquot *dquot, const int warntype) +{ + char *msg = NULL; + struct tty_struct *tty; + + if (warntype == QUOTA_NL_IHARDBELOW || + warntype == QUOTA_NL_ISOFTBELOW || + warntype == QUOTA_NL_BHARDBELOW || + warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) + return; + + tty = get_current_tty(); + if (!tty) + return; + tty_write_message(tty, dquot->dq_sb->s_id); + if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) + tty_write_message(tty, ": warning, "); + else + tty_write_message(tty, ": write failed, "); + tty_write_message(tty, quotatypes[dquot->dq_type]); + switch (warntype) { + case QUOTA_NL_IHARDWARN: + msg = " file limit reached.\r\n"; + break; + case QUOTA_NL_ISOFTLONGWARN: + msg = " file quota exceeded too long.\r\n"; + break; + case QUOTA_NL_ISOFTWARN: + msg = " file quota exceeded.\r\n"; + break; + case QUOTA_NL_BHARDWARN: + msg = " block limit reached.\r\n"; + break; + case QUOTA_NL_BSOFTLONGWARN: + msg = " block quota exceeded too long.\r\n"; + break; + case QUOTA_NL_BSOFTWARN: + msg = " block quota exceeded.\r\n"; + break; + } + tty_write_message(tty, msg); + tty_kref_put(tty); +} +#endif + +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE + +/* Netlink family structure for quota */ +static struct genl_family quota_genl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = 0, + .name = "VFS_DQUOT", + .version = 1, + .maxattr = QUOTA_NL_A_MAX, +}; + +/* Send warning to userspace about user which exceeded quota */ +static void send_warning(const struct dquot *dquot, const char warntype) +{ + static atomic_t seq; + struct sk_buff *skb; + void *msg_head; + int ret; + int msg_size = 4 * nla_total_size(sizeof(u32)) + + 2 * nla_total_size(sizeof(u64)); + + /* We have to allocate using GFP_NOFS as we are called from a + * filesystem performing write and thus further recursion into + * the fs to free some data could cause deadlocks. */ + skb = genlmsg_new(msg_size, GFP_NOFS); + if (!skb) { + printk(KERN_ERR + "VFS: Not enough memory to send quota warning.\n"); + return; + } + msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), + "a_genl_family, 0, QUOTA_NL_C_WARNING); + if (!msg_head) { + printk(KERN_ERR + "VFS: Cannot store netlink header in quota warning.\n"); + goto err_out; + } + ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); + if (ret) + goto attr_err_out; + ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); + if (ret) + goto attr_err_out; + ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); + if (ret) + goto attr_err_out; + ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, + MAJOR(dquot->dq_sb->s_dev)); + if (ret) + goto attr_err_out; + ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, + MINOR(dquot->dq_sb->s_dev)); + if (ret) + goto attr_err_out; + ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); + if (ret) + goto attr_err_out; + genlmsg_end(skb, msg_head); + + ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); + if (ret < 0 && ret != -ESRCH) + printk(KERN_ERR + "VFS: Failed to send notification message: %d\n", ret); + return; +attr_err_out: + printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); +err_out: + kfree_skb(skb); +} +#endif +/* + * Write warnings to the console and send warning messages over netlink. + * + * Note that this function can sleep. + */ +static inline void flush_warnings(struct dquot * const *dquots, char *warntype) +{ + int i; + + for (i = 0; i < MAXQUOTAS; i++) + if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && + !warning_issued(dquots[i], warntype[i])) { +#ifdef CONFIG_PRINT_QUOTA_WARNING + print_warning(dquots[i], warntype[i]); +#endif +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE + send_warning(dquots[i], warntype[i]); +#endif + } +} + +static inline char ignore_hardlimit(struct dquot *dquot) +{ + struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; + + return capable(CAP_SYS_RESOURCE) && + (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); +} + +/* needs dq_data_lock */ +static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) +{ + *warntype = QUOTA_NL_NOWARN; + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + test_bit(DQ_FAKE_B, &dquot->dq_flags)) + return QUOTA_OK; + + if (dquot->dq_dqb.dqb_ihardlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && + !ignore_hardlimit(dquot)) { + *warntype = QUOTA_NL_IHARDWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && + !ignore_hardlimit(dquot)) { + *warntype = QUOTA_NL_ISOFTLONGWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime == 0) { + *warntype = QUOTA_NL_ISOFTWARN; + dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; + } + + return QUOTA_OK; +} + +/* needs dq_data_lock */ +static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) +{ + qsize_t tspace; + + *warntype = QUOTA_NL_NOWARN; + if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + test_bit(DQ_FAKE_B, &dquot->dq_flags)) + return QUOTA_OK; + + tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace + + space; + + if (dquot->dq_dqb.dqb_bhardlimit && + tspace > dquot->dq_dqb.dqb_bhardlimit && + !ignore_hardlimit(dquot)) { + if (!prealloc) + *warntype = QUOTA_NL_BHARDWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && + !ignore_hardlimit(dquot)) { + if (!prealloc) + *warntype = QUOTA_NL_BSOFTLONGWARN; + return NO_QUOTA; + } + + if (dquot->dq_dqb.dqb_bsoftlimit && + tspace > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime == 0) { + if (!prealloc) { + *warntype = QUOTA_NL_BSOFTWARN; + dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; + } + else + /* + * We don't allow preallocation to exceed softlimit so exceeding will + * be always printed + */ + return NO_QUOTA; + } + + return QUOTA_OK; +} + +static int info_idq_free(struct dquot *dquot, qsize_t inodes) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || + dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || + !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) + return QUOTA_NL_NOWARN; + + if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) + return QUOTA_NL_ISOFTBELOW; + if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && + dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) + return QUOTA_NL_IHARDBELOW; + return QUOTA_NL_NOWARN; +} + +static int info_bdq_free(struct dquot *dquot, qsize_t space) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || + dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) + return QUOTA_NL_NOWARN; + + if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) + return QUOTA_NL_BSOFTBELOW; + if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && + dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) + return QUOTA_NL_BHARDBELOW; + return QUOTA_NL_NOWARN; +} +/* + * Initialize quota pointers in inode + * We do things in a bit complicated way but by that we avoid calling + * dqget() and thus filesystem callbacks under dqptr_sem. + */ +int dquot_initialize(struct inode *inode, int type) +{ + unsigned int id = 0; + int cnt, ret = 0; + struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; + struct super_block *sb = inode->i_sb; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return 0; + + /* First get references to structures we might need. */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + switch (cnt) { + case USRQUOTA: + id = inode->i_uid; + break; + case GRPQUOTA: + id = inode->i_gid; + break; + } + got[cnt] = dqget(sb, id, cnt); + } + + down_write(&sb_dqopt(sb)->dqptr_sem); + /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ + if (IS_NOQUOTA(inode)) + goto out_err; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + /* Avoid races with quotaoff() */ + if (!sb_has_quota_active(sb, cnt)) + continue; + if (inode->i_dquot[cnt] == NODQUOT) { + inode->i_dquot[cnt] = got[cnt]; + got[cnt] = NODQUOT; + } + } +out_err: + up_write(&sb_dqopt(sb)->dqptr_sem); + /* Drop unused references */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + dqput(got[cnt]); + return ret; +} +EXPORT_SYMBOL(dquot_initialize); + +/* + * Release all quotas referenced by inode + */ +int dquot_drop(struct inode *inode) +{ + int cnt; + struct dquot *put[MAXQUOTAS]; + + down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + put[cnt] = inode->i_dquot[cnt]; + inode->i_dquot[cnt] = NODQUOT; + } + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + dqput(put[cnt]); + return 0; +} +EXPORT_SYMBOL(dquot_drop); + +/* Wrapper to remove references to quota structures from inode */ +void vfs_dq_drop(struct inode *inode) +{ + /* Here we can get arbitrary inode from clear_inode() so we have + * to be careful. OTOH we don't need locking as quota operations + * are allowed to change only at mount time */ + if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op + && inode->i_sb->dq_op->drop) { + int cnt; + /* Test before calling to rule out calls from proc and such + * where we are not allowed to block. Note that this is + * actually reliable test even without the lock - the caller + * must assure that nobody can come after the DQUOT_DROP and + * add quota pointers back anyway */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt] != NODQUOT) + break; + if (cnt < MAXQUOTAS) + inode->i_sb->dq_op->drop(inode); + } +} +EXPORT_SYMBOL(vfs_dq_drop); + +/* + * Following four functions update i_blocks+i_bytes fields and + * quota information (together with appropriate checks) + * NOTE: We absolutely rely on the fact that caller dirties + * the inode (usually macros in quotaops.h care about this) and + * holds a handle for the current transaction so that dquot write and + * inode write go into the same transaction. + */ + +/* + * This operation can block, but only after everything is updated + */ +int __dquot_alloc_space(struct inode *inode, qsize_t number, + int warn, int reserve) +{ + int cnt, ret = QUOTA_OK; + char warntype[MAXQUOTAS]; + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + warntype[cnt] = QUOTA_NL_NOWARN; + + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) + == NO_QUOTA) { + ret = NO_QUOTA; + goto out_unlock; + } + } + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + if (reserve) + dquot_resv_space(inode->i_dquot[cnt], number); + else + dquot_incr_space(inode->i_dquot[cnt], number); + } + if (!reserve) + inode_add_bytes(inode, number); +out_unlock: + spin_unlock(&dq_data_lock); + flush_warnings(inode->i_dquot, warntype); + return ret; +} + +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) +{ + int cnt, ret = QUOTA_OK; + + /* + * First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex + */ + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out_unlock; + } + + ret = __dquot_alloc_space(inode, number, warn, 0); + if (ret == NO_QUOTA) + goto out_unlock; + + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_alloc_space); + +int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) +{ + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + ret = __dquot_alloc_space(inode, number, warn, 1); +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_reserve_space); + +/* + * This operation can block, but only after everything is updated + */ +int dquot_alloc_inode(const struct inode *inode, qsize_t number) +{ + int cnt, ret = NO_QUOTA; + char warntype[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return QUOTA_OK; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + warntype[cnt] = QUOTA_NL_NOWARN; + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; + } + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) + goto warn_put_all; + } + + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + dquot_incr_inodes(inode->i_dquot[cnt], number); + } + ret = QUOTA_OK; +warn_put_all: + spin_unlock(&dq_data_lock); + if (ret == QUOTA_OK) + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + flush_warnings(inode->i_dquot, warntype); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return ret; +} +EXPORT_SYMBOL(dquot_alloc_inode); + +int dquot_claim_space(struct inode *inode, qsize_t number) +{ + int cnt; + int ret = QUOTA_OK; + + if (IS_NOQUOTA(inode)) { + inode_add_bytes(inode, number); + goto out; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + inode_add_bytes(inode, number); + goto out; + } + + spin_lock(&dq_data_lock); + /* Claim reserved quotas to allocated quotas */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_claim_reserved_space(inode->i_dquot[cnt], + number); + } + /* Update inode bytes */ + inode_add_bytes(inode, number); + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return ret; +} +EXPORT_SYMBOL(dquot_claim_space); + +/* + * Release reserved quota space + */ +void dquot_release_reserved_space(struct inode *inode, qsize_t number) +{ + int cnt; + + if (IS_NOQUOTA(inode)) + goto out; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + if (IS_NOQUOTA(inode)) + goto out_unlock; + + spin_lock(&dq_data_lock); + /* Release reserved dquots */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] != NODQUOT) + dquot_free_reserved_space(inode->i_dquot[cnt], number); + } + spin_unlock(&dq_data_lock); + +out_unlock: + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); +out: + return; +} +EXPORT_SYMBOL(dquot_release_reserved_space); + +/* + * This operation can block, but only after everything is updated + */ +int dquot_free_space(struct inode *inode, qsize_t number) +{ + unsigned int cnt; + char warntype[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) { +out_sub: + inode_sub_bytes(inode, number); + return QUOTA_OK; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Now recheck reliably when holding dqptr_sem */ + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + goto out_sub; + } + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); + dquot_decr_space(inode->i_dquot[cnt], number); + } + inode_sub_bytes(inode, number); + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + flush_warnings(inode->i_dquot, warntype); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; +} +EXPORT_SYMBOL(dquot_free_space); + +/* + * This operation can block, but only after everything is updated + */ +int dquot_free_inode(const struct inode *inode, qsize_t number) +{ + unsigned int cnt; + char warntype[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return QUOTA_OK; + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Now recheck reliably when holding dqptr_sem */ + if (IS_NOQUOTA(inode)) { + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; + } + spin_lock(&dq_data_lock); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt] == NODQUOT) + continue; + warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); + dquot_decr_inodes(inode->i_dquot[cnt], number); + } + spin_unlock(&dq_data_lock); + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (inode->i_dquot[cnt]) + mark_dquot_dirty(inode->i_dquot[cnt]); + flush_warnings(inode->i_dquot, warntype); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return QUOTA_OK; +} +EXPORT_SYMBOL(dquot_free_inode); + +/* + * call back function, get reserved quota space from underlying fs + */ +qsize_t dquot_get_reserved_space(struct inode *inode) +{ + qsize_t reserved_space = 0; + + if (sb_any_quota_active(inode->i_sb) && + inode->i_sb->dq_op->get_reserved_space) + reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); + return reserved_space; +} + +/* + * Transfer the number of inode and blocks from one diskquota to an other. + * + * This operation can block, but only after everything is updated + * A transaction must be started when entering this function. + */ +int dquot_transfer(struct inode *inode, struct iattr *iattr) +{ + qsize_t space, cur_space; + qsize_t rsv_space = 0; + struct dquot *transfer_from[MAXQUOTAS]; + struct dquot *transfer_to[MAXQUOTAS]; + int cnt, ret = QUOTA_OK; + int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, + chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; + char warntype_to[MAXQUOTAS]; + char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; + + /* First test before acquiring mutex - solves deadlocks when we + * re-enter the quota code and are already holding the mutex */ + if (IS_NOQUOTA(inode)) + return QUOTA_OK; + /* Initialize the arrays */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + transfer_from[cnt] = NODQUOT; + transfer_to[cnt] = NODQUOT; + warntype_to[cnt] = QUOTA_NL_NOWARN; + switch (cnt) { + case USRQUOTA: + if (!chuid) + continue; + transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); + break; + case GRPQUOTA: + if (!chgid) + continue; + transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); + break; + } + } + + down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Now recheck reliably when holding dqptr_sem */ + if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + goto put_all; + } + spin_lock(&dq_data_lock); + cur_space = inode_get_bytes(inode); + rsv_space = dquot_get_reserved_space(inode); + space = cur_space + rsv_space; + /* Build the transfer_from list and check the limits */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (transfer_to[cnt] == NODQUOT) + continue; + transfer_from[cnt] = inode->i_dquot[cnt]; + if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == + NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, + warntype_to + cnt) == NO_QUOTA) + goto over_quota; + } + + /* + * Finally perform the needed transfer from transfer_from to transfer_to + */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + /* + * Skip changes for same uid or gid or for turned off quota-type. + */ + if (transfer_to[cnt] == NODQUOT) + continue; + + /* Due to IO error we might not have transfer_from[] structure */ + if (transfer_from[cnt]) { + warntype_from_inodes[cnt] = + info_idq_free(transfer_from[cnt], 1); + warntype_from_space[cnt] = + info_bdq_free(transfer_from[cnt], space); + dquot_decr_inodes(transfer_from[cnt], 1); + dquot_decr_space(transfer_from[cnt], cur_space); + dquot_free_reserved_space(transfer_from[cnt], + rsv_space); + } + + dquot_incr_inodes(transfer_to[cnt], 1); + dquot_incr_space(transfer_to[cnt], cur_space); + dquot_resv_space(transfer_to[cnt], rsv_space); + + inode->i_dquot[cnt] = transfer_to[cnt]; + } + spin_unlock(&dq_data_lock); + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + + /* Dirtify all the dquots - this can block when journalling */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (transfer_from[cnt]) + mark_dquot_dirty(transfer_from[cnt]); + if (transfer_to[cnt]) { + mark_dquot_dirty(transfer_to[cnt]); + /* The reference we got is transferred to the inode */ + transfer_to[cnt] = NODQUOT; + } + } +warn_put_all: + flush_warnings(transfer_to, warntype_to); + flush_warnings(transfer_from, warntype_from_inodes); + flush_warnings(transfer_from, warntype_from_space); +put_all: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + dqput(transfer_from[cnt]); + dqput(transfer_to[cnt]); + } + return ret; +over_quota: + spin_unlock(&dq_data_lock); + up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); + /* Clear dquot pointers we don't want to dqput() */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + transfer_from[cnt] = NODQUOT; + ret = NO_QUOTA; + goto warn_put_all; +} +EXPORT_SYMBOL(dquot_transfer); + +/* Wrapper for transferring ownership of an inode */ +int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) +{ + if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { + vfs_dq_init(inode); + if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) + return 1; + } + return 0; +} +EXPORT_SYMBOL(vfs_dq_transfer); + +/* + * Write info of quota file to disk + */ +int dquot_commit_info(struct super_block *sb, int type) +{ + int ret; + struct quota_info *dqopt = sb_dqopt(sb); + + mutex_lock(&dqopt->dqio_mutex); + ret = dqopt->ops[type]->write_file_info(sb, type); + mutex_unlock(&dqopt->dqio_mutex); + return ret; +} +EXPORT_SYMBOL(dquot_commit_info); + +/* + * Definitions of diskquota operations. + */ +struct dquot_operations dquot_operations = { + .initialize = dquot_initialize, + .drop = dquot_drop, + .alloc_space = dquot_alloc_space, + .alloc_inode = dquot_alloc_inode, + .free_space = dquot_free_space, + .free_inode = dquot_free_inode, + .transfer = dquot_transfer, + .write_dquot = dquot_commit, + .acquire_dquot = dquot_acquire, + .release_dquot = dquot_release, + .mark_dirty = dquot_mark_dquot_dirty, + .write_info = dquot_commit_info, + .alloc_dquot = dquot_alloc, + .destroy_dquot = dquot_destroy, +}; + +/* + * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) + */ +int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) +{ + int cnt, ret = 0; + struct quota_info *dqopt = sb_dqopt(sb); + struct inode *toputinode[MAXQUOTAS]; + + /* Cannot turn off usage accounting without turning off limits, or + * suspend quotas and simultaneously turn quotas off. */ + if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) + || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | + DQUOT_USAGE_ENABLED))) + return -EINVAL; + + /* We need to serialize quota_off() for device */ + mutex_lock(&dqopt->dqonoff_mutex); + + /* + * Skip everything if there's nothing to do. We have to do this because + * sometimes we are called when fill_super() failed and calling + * sync_fs() in such cases does no good. + */ + if (!sb_any_quota_loaded(sb)) { + mutex_unlock(&dqopt->dqonoff_mutex); + return 0; + } + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + toputinode[cnt] = NULL; + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_loaded(sb, cnt)) + continue; + + if (flags & DQUOT_SUSPENDED) { + spin_lock(&dq_state_lock); + dqopt->flags |= + dquot_state_flag(DQUOT_SUSPENDED, cnt); + spin_unlock(&dq_state_lock); + } else { + spin_lock(&dq_state_lock); + dqopt->flags &= ~dquot_state_flag(flags, cnt); + /* Turning off suspended quotas? */ + if (!sb_has_quota_loaded(sb, cnt) && + sb_has_quota_suspended(sb, cnt)) { + dqopt->flags &= ~dquot_state_flag( + DQUOT_SUSPENDED, cnt); + spin_unlock(&dq_state_lock); + iput(dqopt->files[cnt]); + dqopt->files[cnt] = NULL; + continue; + } + spin_unlock(&dq_state_lock); + } + + /* We still have to keep quota loaded? */ + if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) + continue; + + /* Note: these are blocking operations */ + drop_dquot_ref(sb, cnt); + invalidate_dquots(sb, cnt); + /* + * Now all dquots should be invalidated, all writes done so we should be only + * users of the info. No locks needed. + */ + if (info_dirty(&dqopt->info[cnt])) + sb->dq_op->write_info(sb, cnt); + if (dqopt->ops[cnt]->free_file_info) + dqopt->ops[cnt]->free_file_info(sb, cnt); + put_quota_format(dqopt->info[cnt].dqi_format); + + toputinode[cnt] = dqopt->files[cnt]; + if (!sb_has_quota_loaded(sb, cnt)) + dqopt->files[cnt] = NULL; + dqopt->info[cnt].dqi_flags = 0; + dqopt->info[cnt].dqi_igrace = 0; + dqopt->info[cnt].dqi_bgrace = 0; + dqopt->ops[cnt] = NULL; + } + mutex_unlock(&dqopt->dqonoff_mutex); + + /* Skip syncing and setting flags if quota files are hidden */ + if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) + goto put_inodes; + + /* Sync the superblock so that buffers with quota data are written to + * disk (and so userspace sees correct data afterwards). */ + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, 1); + sync_blockdev(sb->s_bdev); + /* Now the quota files are just ordinary files and we can set the + * inode flags back. Moreover we discard the pagecache so that + * userspace sees the writes we did bypassing the pagecache. We + * must also discard the blockdev buffers so that we see the + * changes done by userspace on the next quotaon() */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (toputinode[cnt]) { + mutex_lock(&dqopt->dqonoff_mutex); + /* If quota was reenabled in the meantime, we have + * nothing to do */ + if (!sb_has_quota_loaded(sb, cnt)) { + mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); + toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | + S_NOATIME | S_NOQUOTA); + truncate_inode_pages(&toputinode[cnt]->i_data, 0); + mutex_unlock(&toputinode[cnt]->i_mutex); + mark_inode_dirty(toputinode[cnt]); + } + mutex_unlock(&dqopt->dqonoff_mutex); + } + if (sb->s_bdev) + invalidate_bdev(sb->s_bdev); +put_inodes: + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if (toputinode[cnt]) { + /* On remount RO, we keep the inode pointer so that we + * can reenable quota on the subsequent remount RW. We + * have to check 'flags' variable and not use sb_has_ + * function because another quotaon / quotaoff could + * change global state before we got here. We refuse + * to suspend quotas when there is pending delete on + * the quota file... */ + if (!(flags & DQUOT_SUSPENDED)) + iput(toputinode[cnt]); + else if (!toputinode[cnt]->i_nlink) + ret = -EBUSY; + } + return ret; +} +EXPORT_SYMBOL(vfs_quota_disable); + +int vfs_quota_off(struct super_block *sb, int type, int remount) +{ + return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : + (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); +} +EXPORT_SYMBOL(vfs_quota_off); +/* + * Turn quotas on on a device + */ + +/* + * Helper function to turn quotas on when we already have the inode of + * quota file and no quota information is loaded. + */ +static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, + unsigned int flags) +{ + struct quota_format_type *fmt = find_quota_format(format_id); + struct super_block *sb = inode->i_sb; + struct quota_info *dqopt = sb_dqopt(sb); + int error; + int oldflags = -1; + + if (!fmt) + return -ESRCH; + if (!S_ISREG(inode->i_mode)) { + error = -EACCES; + goto out_fmt; + } + if (IS_RDONLY(inode)) { + error = -EROFS; + goto out_fmt; + } + if (!sb->s_op->quota_write || !sb->s_op->quota_read) { + error = -EINVAL; + goto out_fmt; + } + /* Usage always has to be set... */ + if (!(flags & DQUOT_USAGE_ENABLED)) { + error = -EINVAL; + goto out_fmt; + } + + if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { + /* As we bypass the pagecache we must now flush the inode so + * that we see all the changes from userspace... */ + write_inode_now(inode, 1); + /* And now flush the block cache so that kernel sees the + * changes */ + invalidate_bdev(sb->s_bdev); + } + mutex_lock(&inode->i_mutex); + mutex_lock(&dqopt->dqonoff_mutex); + if (sb_has_quota_loaded(sb, type)) { + error = -EBUSY; + goto out_lock; + } + + if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { + /* We don't want quota and atime on quota files (deadlocks + * possible) Also nobody should write to the file - we use + * special IO operations which ignore the immutable bit. */ + down_write(&dqopt->dqptr_sem); + oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); + inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; + up_write(&dqopt->dqptr_sem); + sb->dq_op->drop(inode); + } + + error = -EIO; + dqopt->files[type] = igrab(inode); + if (!dqopt->files[type]) + goto out_lock; + error = -EINVAL; + if (!fmt->qf_ops->check_quota_file(sb, type)) + goto out_file_init; + + dqopt->ops[type] = fmt->qf_ops; + dqopt->info[type].dqi_format = fmt; + dqopt->info[type].dqi_fmt_id = format_id; + INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); + mutex_lock(&dqopt->dqio_mutex); + if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { + mutex_unlock(&dqopt->dqio_mutex); + goto out_file_init; + } + mutex_unlock(&dqopt->dqio_mutex); + mutex_unlock(&inode->i_mutex); + spin_lock(&dq_state_lock); + dqopt->flags |= dquot_state_flag(flags, type); + spin_unlock(&dq_state_lock); + + add_dquot_ref(sb, type); + mutex_unlock(&dqopt->dqonoff_mutex); + + return 0; + +out_file_init: + dqopt->files[type] = NULL; + iput(inode); +out_lock: + mutex_unlock(&dqopt->dqonoff_mutex); + if (oldflags != -1) { + down_write(&dqopt->dqptr_sem); + /* Set the flags back (in the case of accidental quotaon() + * on a wrong file we don't want to mess up the flags) */ + inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); + inode->i_flags |= oldflags; + up_write(&dqopt->dqptr_sem); + } + mutex_unlock(&inode->i_mutex); +out_fmt: + put_quota_format(fmt); + + return error; +} + +/* Reenable quotas on remount RW */ +static int vfs_quota_on_remount(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + struct inode *inode; + int ret; + unsigned int flags; + + mutex_lock(&dqopt->dqonoff_mutex); + if (!sb_has_quota_suspended(sb, type)) { + mutex_unlock(&dqopt->dqonoff_mutex); + return 0; + } + inode = dqopt->files[type]; + dqopt->files[type] = NULL; + spin_lock(&dq_state_lock); + flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED, type); + dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); + spin_unlock(&dq_state_lock); + mutex_unlock(&dqopt->dqonoff_mutex); + + flags = dquot_generic_flag(flags, type); + ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, + flags); + iput(inode); + + return ret; +} + +int vfs_quota_on_path(struct super_block *sb, int type, int format_id, + struct path *path) +{ + int error = security_quota_on(path->dentry); + if (error) + return error; + /* Quota file not on the same filesystem? */ + if (path->mnt->mnt_sb != sb) + error = -EXDEV; + else + error = vfs_load_quota_inode(path->dentry->d_inode, type, + format_id, DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED); + return error; +} +EXPORT_SYMBOL(vfs_quota_on_path); + +int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, + int remount) +{ + struct path path; + int error; + + if (remount) + return vfs_quota_on_remount(sb, type); + + error = kern_path(name, LOOKUP_FOLLOW, &path); + if (!error) { + error = vfs_quota_on_path(sb, type, format_id, &path); + path_put(&path); + } + return error; +} +EXPORT_SYMBOL(vfs_quota_on); + +/* + * More powerful function for turning on quotas allowing setting + * of individual quota flags + */ +int vfs_quota_enable(struct inode *inode, int type, int format_id, + unsigned int flags) +{ + int ret = 0; + struct super_block *sb = inode->i_sb; + struct quota_info *dqopt = sb_dqopt(sb); + + /* Just unsuspend quotas? */ + if (flags & DQUOT_SUSPENDED) + return vfs_quota_on_remount(sb, type); + if (!flags) + return 0; + /* Just updating flags needed? */ + if (sb_has_quota_loaded(sb, type)) { + mutex_lock(&dqopt->dqonoff_mutex); + /* Now do a reliable test... */ + if (!sb_has_quota_loaded(sb, type)) { + mutex_unlock(&dqopt->dqonoff_mutex); + goto load_quota; + } + if (flags & DQUOT_USAGE_ENABLED && + sb_has_quota_usage_enabled(sb, type)) { + ret = -EBUSY; + goto out_lock; + } + if (flags & DQUOT_LIMITS_ENABLED && + sb_has_quota_limits_enabled(sb, type)) { + ret = -EBUSY; + goto out_lock; + } + spin_lock(&dq_state_lock); + sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); + spin_unlock(&dq_state_lock); +out_lock: + mutex_unlock(&dqopt->dqonoff_mutex); + return ret; + } + +load_quota: + return vfs_load_quota_inode(inode, type, format_id, flags); +} +EXPORT_SYMBOL(vfs_quota_enable); + +/* + * This function is used when filesystem needs to initialize quotas + * during mount time. + */ +int vfs_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type) +{ + struct dentry *dentry; + int error; + + dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (!dentry->d_inode) { + error = -ENOENT; + goto out; + } + + error = security_quota_on(dentry); + if (!error) + error = vfs_load_quota_inode(dentry->d_inode, type, format_id, + DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + +out: + dput(dentry); + return error; +} +EXPORT_SYMBOL(vfs_quota_on_mount); + +/* Wrapper to turn on quotas when remounting rw */ +int vfs_dq_quota_on_remount(struct super_block *sb) +{ + int cnt; + int ret = 0, err; + + if (!sb->s_qcop || !sb->s_qcop->quota_on) + return -ENOSYS; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); + if (err < 0 && !ret) + ret = err; + } + return ret; +} +EXPORT_SYMBOL(vfs_dq_quota_on_remount); + +static inline qsize_t qbtos(qsize_t blocks) +{ + return blocks << QIF_DQBLKSIZE_BITS; +} + +static inline qsize_t stoqb(qsize_t space) +{ + return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; +} + +/* Generic routine for getting common part of quota structure */ +static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) +{ + struct mem_dqblk *dm = &dquot->dq_dqb; + + spin_lock(&dq_data_lock); + di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); + di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); + di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; + di->dqb_ihardlimit = dm->dqb_ihardlimit; + di->dqb_isoftlimit = dm->dqb_isoftlimit; + di->dqb_curinodes = dm->dqb_curinodes; + di->dqb_btime = dm->dqb_btime; + di->dqb_itime = dm->dqb_itime; + di->dqb_valid = QIF_ALL; + spin_unlock(&dq_data_lock); +} + +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot; + + dquot = dqget(sb, id, type); + if (dquot == NODQUOT) + return -ESRCH; + do_get_dqblk(dquot, di); + dqput(dquot); + + return 0; +} +EXPORT_SYMBOL(vfs_get_dqblk); + +/* Generic routine for setting common part of quota structure */ +static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) +{ + struct mem_dqblk *dm = &dquot->dq_dqb; + int check_blim = 0, check_ilim = 0; + struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; + + if ((di->dqb_valid & QIF_BLIMITS && + (di->dqb_bhardlimit > dqi->dqi_maxblimit || + di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || + (di->dqb_valid & QIF_ILIMITS && + (di->dqb_ihardlimit > dqi->dqi_maxilimit || + di->dqb_isoftlimit > dqi->dqi_maxilimit))) + return -ERANGE; + + spin_lock(&dq_data_lock); + if (di->dqb_valid & QIF_SPACE) { + dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; + check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_BLIMITS) { + dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); + dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); + check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_INODES) { + dm->dqb_curinodes = di->dqb_curinodes; + check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_ILIMITS) { + dm->dqb_isoftlimit = di->dqb_isoftlimit; + dm->dqb_ihardlimit = di->dqb_ihardlimit; + check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_BTIME) { + dm->dqb_btime = di->dqb_btime; + check_blim = 1; + __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); + } + if (di->dqb_valid & QIF_ITIME) { + dm->dqb_itime = di->dqb_itime; + check_ilim = 1; + __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); + } + + if (check_blim) { + if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { + dm->dqb_btime = 0; + clear_bit(DQ_BLKS_B, &dquot->dq_flags); + } + else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; + } + if (check_ilim) { + if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { + dm->dqb_itime = 0; + clear_bit(DQ_INODES_B, &dquot->dq_flags); + } + else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_itime = get_seconds() + dqi->dqi_igrace; + } + if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) + clear_bit(DQ_FAKE_B, &dquot->dq_flags); + else + set_bit(DQ_FAKE_B, &dquot->dq_flags); + spin_unlock(&dq_data_lock); + mark_dquot_dirty(dquot); + + return 0; +} + +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot; + int rc; + + dquot = dqget(sb, id, type); + if (!dquot) { + rc = -ESRCH; + goto out; + } + rc = do_set_dqblk(dquot, di); + dqput(dquot); +out: + return rc; +} +EXPORT_SYMBOL(vfs_set_dqblk); + +/* Generic routine for getting common part of quota file information */ +int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + if (!sb_has_quota_active(sb, type)) { + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return -ESRCH; + } + mi = sb_dqopt(sb)->info + type; + spin_lock(&dq_data_lock); + ii->dqi_bgrace = mi->dqi_bgrace; + ii->dqi_igrace = mi->dqi_igrace; + ii->dqi_flags = mi->dqi_flags & DQF_MASK; + ii->dqi_valid = IIF_ALL; + spin_unlock(&dq_data_lock); + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return 0; +} +EXPORT_SYMBOL(vfs_get_dqinfo); + +/* Generic routine for setting common part of quota file information */ +int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi; + int err = 0; + + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + if (!sb_has_quota_active(sb, type)) { + err = -ESRCH; + goto out; + } + mi = sb_dqopt(sb)->info + type; + spin_lock(&dq_data_lock); + if (ii->dqi_valid & IIF_BGRACE) + mi->dqi_bgrace = ii->dqi_bgrace; + if (ii->dqi_valid & IIF_IGRACE) + mi->dqi_igrace = ii->dqi_igrace; + if (ii->dqi_valid & IIF_FLAGS) + mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); + spin_unlock(&dq_data_lock); + mark_info_dirty(sb, type); + /* Force write to disk */ + sb->dq_op->write_info(sb, type); +out: + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + return err; +} +EXPORT_SYMBOL(vfs_set_dqinfo); + +struct quotactl_ops vfs_quotactl_ops = { + .quota_on = vfs_quota_on, + .quota_off = vfs_quota_off, + .quota_sync = vfs_quota_sync, + .get_info = vfs_get_dqinfo, + .set_info = vfs_set_dqinfo, + .get_dqblk = vfs_get_dqblk, + .set_dqblk = vfs_set_dqblk +}; + +static ctl_table fs_dqstats_table[] = { + { + .ctl_name = FS_DQ_LOOKUPS, + .procname = "lookups", + .data = &dqstats.lookups, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_DROPS, + .procname = "drops", + .data = &dqstats.drops, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_READS, + .procname = "reads", + .data = &dqstats.reads, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_WRITES, + .procname = "writes", + .data = &dqstats.writes, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_CACHE_HITS, + .procname = "cache_hits", + .data = &dqstats.cache_hits, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_ALLOCATED, + .procname = "allocated_dquots", + .data = &dqstats.allocated_dquots, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_FREE, + .procname = "free_dquots", + .data = &dqstats.free_dquots, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = FS_DQ_SYNCS, + .procname = "syncs", + .data = &dqstats.syncs, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, +#ifdef CONFIG_PRINT_QUOTA_WARNING + { + .ctl_name = FS_DQ_WARNINGS, + .procname = "warnings", + .data = &flag_print_warnings, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif + { .ctl_name = 0 }, +}; + +static ctl_table fs_table[] = { + { + .ctl_name = FS_DQSTATS, + .procname = "quota", + .mode = 0555, + .child = fs_dqstats_table, + }, + { .ctl_name = 0 }, +}; + +static ctl_table sys_table[] = { + { + .ctl_name = CTL_FS, + .procname = "fs", + .mode = 0555, + .child = fs_table, + }, + { .ctl_name = 0 }, +}; + +static int __init dquot_init(void) +{ + int i; + unsigned long nr_hash, order; + + printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); + + register_sysctl_table(sys_table); + + dquot_cachep = kmem_cache_create("dquot", + sizeof(struct dquot), sizeof(unsigned long) * 4, + (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_PANIC), + NULL); + + order = 0; + dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); + if (!dquot_hash) + panic("Cannot create dquot hash table"); + + /* Find power-of-two hlist_heads which can fit into allocation */ + nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); + dq_hash_bits = 0; + do { + dq_hash_bits++; + } while (nr_hash >> dq_hash_bits); + dq_hash_bits--; + + nr_hash = 1UL << dq_hash_bits; + dq_hash_mask = nr_hash - 1; + for (i = 0; i < nr_hash; i++) + INIT_HLIST_HEAD(dquot_hash + i); + + printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", + nr_hash, order, (PAGE_SIZE << order)); + + register_shrinker(&dqcache_shrinker); + +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE + if (genl_register_family("a_genl_family) != 0) + printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); +#endif + + return 0; +} +module_init(dquot_init); diff --git a/fs/quota/quota.c b/fs/quota/quota.c new file mode 100644 index 0000000..d76ada9 --- /dev/null +++ b/fs/quota/quota.c @@ -0,0 +1,513 @@ +/* + * Quota code necessary even when VFS quota support is not compiled + * into the kernel. The interesting stuff is over in dquot.c, here + * we have symbols for initial quotactl(2) handling, the sysctl(2) + * variables, etc - things needed even when quota support disabled. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check validity of generic quotactl commands */ +static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= MAXQUOTAS) + return -EINVAL; + if (!sb && cmd != Q_SYNC) + return -ENODEV; + /* Is operation supported? */ + if (sb && !sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_GETFMT: + break; + case Q_QUOTAON: + if (!sb->s_qcop->quota_on) + return -ENOSYS; + break; + case Q_QUOTAOFF: + if (!sb->s_qcop->quota_off) + return -ENOSYS; + break; + case Q_SETINFO: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_GETINFO: + if (!sb->s_qcop->get_info) + return -ENOSYS; + break; + case Q_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_SYNC: + if (sb && !sb->s_qcop->quota_sync) + return -ENOSYS; + break; + default: + return -EINVAL; + } + + /* Is quota turned on for commands which need it? */ + switch (cmd) { + case Q_GETFMT: + case Q_GETINFO: + case Q_SETINFO: + case Q_SETQUOTA: + case Q_GETQUOTA: + /* This is just informative test so we are satisfied without a lock */ + if (!sb_has_quota_active(sb, type)) + return -ESRCH; + } + + /* Check privileges */ + if (cmd == Q_GETQUOTA) { + if (((type == USRQUOTA && current_euid() != id) || + (type == GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return 0; +} + +/* Check validity of XFS Quota Manager commands */ +static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= XQM_MAXQUOTAS) + return -EINVAL; + if (!sb) + return -ENODEV; + if (!sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: + if (!sb->s_qcop->set_xstate) + return -ENOSYS; + break; + case Q_XGETQSTAT: + if (!sb->s_qcop->get_xstate) + return -ENOSYS; + break; + case Q_XSETQLIM: + if (!sb->s_qcop->set_xquota) + return -ENOSYS; + break; + case Q_XGETQUOTA: + if (!sb->s_qcop->get_xquota) + return -ENOSYS; + break; + case Q_XQUOTASYNC: + if (!sb->s_qcop->quota_sync) + return -ENOSYS; + break; + default: + return -EINVAL; + } + + /* Check privileges */ + if (cmd == Q_XGETQUOTA) { + if (((type == XQM_USRQUOTA && current_euid() != id) || + (type == XQM_GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } + + return 0; +} + +static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + int error; + + if (XQM_COMMAND(cmd)) + error = xqm_quotactl_valid(sb, type, cmd, id); + else + error = generic_quotactl_valid(sb, type, cmd, id); + if (!error) + error = security_quotactl(cmd, type, id, sb); + return error; +} + +static void quota_sync_sb(struct super_block *sb, int type) +{ + int cnt; + + sb->s_qcop->quota_sync(sb, type); + + if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) + return; + /* This is not very clever (and fast) but currently I don't know about + * any other simple way of getting quota data to disk and we must get + * them there for userspace to be visible... */ + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, 1); + sync_blockdev(sb->s_bdev); + + /* + * Now when everything is written we can discard the pagecache so + * that userspace sees the changes. + */ + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); + truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); + mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); + } + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); +} + +void sync_dquots(struct super_block *sb, int type) +{ + int cnt; + + if (sb) { + if (sb->s_qcop->quota_sync) + quota_sync_sb(sb, type); + return; + } + + spin_lock(&sb_lock); +restart: + list_for_each_entry(sb, &super_blocks, s_list) { + /* This test just improves performance so it needn't be reliable... */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && type != cnt) + continue; + if (!sb_has_quota_active(sb, cnt)) + continue; + if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && + list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) + continue; + break; + } + if (cnt == MAXQUOTAS) + continue; + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (sb->s_root && sb->s_qcop->quota_sync) + quota_sync_sb(sb, type); + up_read(&sb->s_umount); + spin_lock(&sb_lock); + if (__put_super_and_need_restart(sb)) + goto restart; + } + spin_unlock(&sb_lock); +} + +/* Copy parameters and call proper function */ +static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) +{ + int ret; + + switch (cmd) { + case Q_QUOTAON: { + char *pathname; + + if (IS_ERR(pathname = getname(addr))) + return PTR_ERR(pathname); + ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); + putname(pathname); + return ret; + } + case Q_QUOTAOFF: + return sb->s_qcop->quota_off(sb, type, 0); + + case Q_GETFMT: { + __u32 fmt; + + down_read(&sb_dqopt(sb)->dqptr_sem); + if (!sb_has_quota_active(sb, type)) { + up_read(&sb_dqopt(sb)->dqptr_sem); + return -ESRCH; + } + fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; + up_read(&sb_dqopt(sb)->dqptr_sem); + if (copy_to_user(addr, &fmt, sizeof(fmt))) + return -EFAULT; + return 0; + } + case Q_GETINFO: { + struct if_dqinfo info; + + if ((ret = sb->s_qcop->get_info(sb, type, &info))) + return ret; + if (copy_to_user(addr, &info, sizeof(info))) + return -EFAULT; + return 0; + } + case Q_SETINFO: { + struct if_dqinfo info; + + if (copy_from_user(&info, addr, sizeof(info))) + return -EFAULT; + return sb->s_qcop->set_info(sb, type, &info); + } + case Q_GETQUOTA: { + struct if_dqblk idq; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) + return ret; + if (copy_to_user(addr, &idq, sizeof(idq))) + return -EFAULT; + return 0; + } + case Q_SETQUOTA: { + struct if_dqblk idq; + + if (copy_from_user(&idq, addr, sizeof(idq))) + return -EFAULT; + return sb->s_qcop->set_dqblk(sb, type, id, &idq); + } + case Q_SYNC: + sync_dquots(sb, type); + return 0; + + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: { + __u32 flags; + + if (copy_from_user(&flags, addr, sizeof(flags))) + return -EFAULT; + return sb->s_qcop->set_xstate(sb, flags, cmd); + } + case Q_XGETQSTAT: { + struct fs_quota_stat fqs; + + if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) + return ret; + if (copy_to_user(addr, &fqs, sizeof(fqs))) + return -EFAULT; + return 0; + } + case Q_XSETQLIM: { + struct fs_disk_quota fdq; + + if (copy_from_user(&fdq, addr, sizeof(fdq))) + return -EFAULT; + return sb->s_qcop->set_xquota(sb, type, id, &fdq); + } + case Q_XGETQUOTA: { + struct fs_disk_quota fdq; + + if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) + return ret; + if (copy_to_user(addr, &fdq, sizeof(fdq))) + return -EFAULT; + return 0; + } + case Q_XQUOTASYNC: + return sb->s_qcop->quota_sync(sb, type); + /* We never reach here unless validity check is broken */ + default: + BUG(); + } + return 0; +} + +/* + * look up a superblock on which quota ops will be performed + * - use the name of a block device to find the superblock thereon + */ +static inline struct super_block *quotactl_block(const char __user *special) +{ +#ifdef CONFIG_BLOCK + struct block_device *bdev; + struct super_block *sb; + char *tmp = getname(special); + + if (IS_ERR(tmp)) + return ERR_CAST(tmp); + bdev = lookup_bdev(tmp); + putname(tmp); + if (IS_ERR(bdev)) + return ERR_CAST(bdev); + sb = get_super(bdev); + bdput(bdev); + if (!sb) + return ERR_PTR(-ENODEV); + + return sb; +#else + return ERR_PTR(-ENODEV); +#endif +} + +/* + * This is the system call interface. This communicates with + * the user-level programs. Currently this only supports diskquota + * calls. Maybe we need to add the process quotas etc. in the future, + * but we probably should use rlimits for that. + */ +SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, + qid_t, id, void __user *, addr) +{ + uint cmds, type; + struct super_block *sb = NULL; + int ret; + + cmds = cmd >> SUBCMDSHIFT; + type = cmd & SUBCMDMASK; + + if (cmds != Q_SYNC || special) { + sb = quotactl_block(special); + if (IS_ERR(sb)) + return PTR_ERR(sb); + } + + ret = check_quotactl_valid(sb, type, cmds, id); + if (ret >= 0) + ret = do_quotactl(sb, type, cmds, id, addr); + if (sb) + drop_super(sb); + + return ret; +} + +#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) +/* + * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) + * and is necessary due to alignment problems. + */ +struct compat_if_dqblk { + compat_u64 dqb_bhardlimit; + compat_u64 dqb_bsoftlimit; + compat_u64 dqb_curspace; + compat_u64 dqb_ihardlimit; + compat_u64 dqb_isoftlimit; + compat_u64 dqb_curinodes; + compat_u64 dqb_btime; + compat_u64 dqb_itime; + compat_uint_t dqb_valid; +}; + +/* XFS structures */ +struct compat_fs_qfilestat { + compat_u64 dqb_bhardlimit; + compat_u64 qfs_nblks; + compat_uint_t qfs_nextents; +}; + +struct compat_fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + struct compat_fs_qfilestat qs_uquota; + struct compat_fs_qfilestat qs_gquota; + compat_uint_t qs_incoredqs; + compat_int_t qs_btimelimit; + compat_int_t qs_itimelimit; + compat_int_t qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +}; + +asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr) +{ + unsigned int cmds; + struct if_dqblk __user *dqblk; + struct compat_if_dqblk __user *compat_dqblk; + struct fs_quota_stat __user *fsqstat; + struct compat_fs_quota_stat __user *compat_fsqstat; + compat_uint_t data; + u16 xdata; + long ret; + + cmds = cmd >> SUBCMDSHIFT; + + switch (cmds) { + case Q_GETQUOTA: + dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); + compat_dqblk = addr; + ret = sys_quotactl(cmd, special, id, dqblk); + if (ret) + break; + if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || + get_user(data, &dqblk->dqb_valid) || + put_user(data, &compat_dqblk->dqb_valid)) + ret = -EFAULT; + break; + case Q_SETQUOTA: + dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); + compat_dqblk = addr; + ret = -EFAULT; + if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || + get_user(data, &compat_dqblk->dqb_valid) || + put_user(data, &dqblk->dqb_valid)) + break; + ret = sys_quotactl(cmd, special, id, dqblk); + break; + case Q_XGETQSTAT: + fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); + compat_fsqstat = addr; + ret = sys_quotactl(cmd, special, id, fsqstat); + if (ret) + break; + ret = -EFAULT; + /* Copying qs_version, qs_flags, qs_pad */ + if (copy_in_user(compat_fsqstat, fsqstat, + offsetof(struct compat_fs_quota_stat, qs_uquota))) + break; + /* Copying qs_uquota */ + if (copy_in_user(&compat_fsqstat->qs_uquota, + &fsqstat->qs_uquota, + sizeof(compat_fsqstat->qs_uquota)) || + get_user(data, &fsqstat->qs_uquota.qfs_nextents) || + put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) + break; + /* Copying qs_gquota */ + if (copy_in_user(&compat_fsqstat->qs_gquota, + &fsqstat->qs_gquota, + sizeof(compat_fsqstat->qs_gquota)) || + get_user(data, &fsqstat->qs_gquota.qfs_nextents) || + put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) + break; + /* Copying the rest */ + if (copy_in_user(&compat_fsqstat->qs_incoredqs, + &fsqstat->qs_incoredqs, + sizeof(struct compat_fs_quota_stat) - + offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || + get_user(xdata, &fsqstat->qs_iwarnlimit) || + put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) + break; + ret = 0; + break; + default: + ret = sys_quotactl(cmd, special, id, addr); + } + return ret; +} +#endif diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c new file mode 100644 index 0000000..953404c --- /dev/null +++ b/fs/quota/quota_tree.c @@ -0,0 +1,645 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quota_tree.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Quota trie support"); +MODULE_LICENSE("GPL"); + +#define __QUOTA_QT_PARANOIA + +typedef char *dqbuf_t; + +static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + + depth = info->dqi_qtree_depth - depth - 1; + while (depth--) + id /= epb; + return id % epb; +} + +/* Number of entries in one blocks */ +static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) +{ + return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) + / info->dqi_entry_size; +} + +static dqbuf_t getdqbuf(size_t size) +{ + dqbuf_t buf = kmalloc(size, GFP_NOFS); + if (!buf) + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + return buf; +} + +static inline void freedqbuf(dqbuf_t buf) +{ + kfree(buf); +} + +static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +{ + struct super_block *sb = info->dqi_sb; + + memset(buf, 0, info->dqi_usable_bs); + return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); +} + +static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +{ + struct super_block *sb = info->dqi_sb; + + return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); +} + +/* Remove empty block from list and return it */ +static int get_free_dqblk(struct qtree_mem_dqinfo *info) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int ret, blk; + + if (!buf) + return -ENOMEM; + if (info->dqi_free_blk) { + blk = info->dqi_free_blk; + ret = read_blk(info, blk, buf); + if (ret < 0) + goto out_buf; + info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); + } + else { + memset(buf, 0, info->dqi_usable_bs); + /* Assure block allocation... */ + ret = write_blk(info, info->dqi_blocks, buf); + if (ret < 0) + goto out_buf; + blk = info->dqi_blocks++; + } + mark_info_dirty(info->dqi_sb, info->dqi_type); + ret = blk; +out_buf: + freedqbuf(buf); + return ret; +} + +/* Insert empty block to the list */ +static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int err; + + dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); + dh->dqdh_prev_free = cpu_to_le32(0); + dh->dqdh_entries = cpu_to_le16(0); + err = write_blk(info, blk, buf); + if (err < 0) + return err; + info->dqi_free_blk = blk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + return 0; +} + +/* Remove given block from the list of blocks with free entries */ +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + uint nextblk = le32_to_cpu(dh->dqdh_next_free); + uint prevblk = le32_to_cpu(dh->dqdh_prev_free); + int err; + + if (!tmpbuf) + return -ENOMEM; + if (nextblk) { + err = read_blk(info, nextblk, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + dh->dqdh_prev_free; + err = write_blk(info, nextblk, tmpbuf); + if (err < 0) + goto out_buf; + } + if (prevblk) { + err = read_blk(info, prevblk, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = + dh->dqdh_next_free; + err = write_blk(info, prevblk, tmpbuf); + if (err < 0) + goto out_buf; + } else { + info->dqi_free_entry = nextblk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + } + freedqbuf(tmpbuf); + dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); + /* No matter whether write succeeds block is out of list */ + if (write_blk(info, blk, buf) < 0) + printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Insert given block to the beginning of list with free entries */ +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + int err; + + if (!tmpbuf) + return -ENOMEM; + dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); + dh->dqdh_prev_free = cpu_to_le32(0); + err = write_blk(info, blk, buf); + if (err < 0) + goto out_buf; + if (info->dqi_free_entry) { + err = read_blk(info, info->dqi_free_entry, tmpbuf); + if (err < 0) + goto out_buf; + ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + cpu_to_le32(blk); + err = write_blk(info, info->dqi_free_entry, tmpbuf); + if (err < 0) + goto out_buf; + } + freedqbuf(tmpbuf); + info->dqi_free_entry = blk; + mark_info_dirty(info->dqi_sb, info->dqi_type); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Is the entry in the block free? */ +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) +{ + int i; + + for (i = 0; i < info->dqi_entry_size; i++) + if (disk[i]) + return 0; + return 1; +} +EXPORT_SYMBOL(qtree_entry_unused); + +/* Find space for dquot */ +static uint find_free_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, int *err) +{ + uint blk, i; + struct qt_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *ddquot; + + *err = 0; + if (!buf) { + *err = -ENOMEM; + return 0; + } + dh = (struct qt_disk_dqdbheader *)buf; + if (info->dqi_free_entry) { + blk = info->dqi_free_entry; + *err = read_blk(info, blk, buf); + if (*err < 0) + goto out_buf; + } else { + blk = get_free_dqblk(info); + if ((int)blk < 0) { + *err = blk; + freedqbuf(buf); + return 0; + } + memset(buf, 0, info->dqi_usable_bs); + /* This is enough as block is already zeroed and entry list is empty... */ + info->dqi_free_entry = blk; + mark_info_dirty(dquot->dq_sb, dquot->dq_type); + } + /* Block will be full? */ + if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { + *err = remove_free_dqentry(info, buf, blk); + if (*err < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't " + "remove block (%u) from entry free list.\n", + blk); + goto out_buf; + } + } + le16_add_cpu(&dh->dqdh_entries, 1); + /* Find free structure in block */ + for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); + i++, ddquot += info->dqi_entry_size); +#ifdef __QUOTA_QT_PARANOIA + if (i == qtree_dqstr_in_blk(info)) { + printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " + "but it shouldn't.\n"); + *err = -EIO; + goto out_buf; + } +#endif + *err = write_blk(info, blk, buf); + if (*err < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " + "data block %u.\n", blk); + goto out_buf; + } + dquot->dq_off = (blk << info->dqi_blocksize_bits) + + sizeof(struct qt_disk_dqdbheader) + + i * info->dqi_entry_size; + freedqbuf(buf); + return blk; +out_buf: + freedqbuf(buf); + return 0; +} + +/* Insert reference to structure into the trie */ +static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint *treeblk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0, newson = 0, newact = 0; + __le32 *ref; + uint newblk; + + if (!buf) + return -ENOMEM; + if (!*treeblk) { + ret = get_free_dqblk(info); + if (ret < 0) + goto out_buf; + *treeblk = ret; + memset(buf, 0, info->dqi_usable_bs); + newact = 1; + } else { + ret = read_blk(info, *treeblk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read tree quota block " + "%u.\n", *treeblk); + goto out_buf; + } + } + ref = (__le32 *)buf; + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (!newblk) + newson = 1; + if (depth == info->dqi_qtree_depth - 1) { +#ifdef __QUOTA_QT_PARANOIA + if (newblk) { + printk(KERN_ERR "VFS: Inserting already present quota " + "entry (block %u).\n", + le32_to_cpu(ref[get_index(info, + dquot->dq_id, depth)])); + ret = -EIO; + goto out_buf; + } +#endif + newblk = find_free_dqentry(info, dquot, &ret); + } else { + ret = do_insert_tree(info, dquot, &newblk, depth+1); + } + if (newson && ret >= 0) { + ref[get_index(info, dquot->dq_id, depth)] = + cpu_to_le32(newblk); + ret = write_blk(info, *treeblk, buf); + } else if (newact && ret < 0) { + put_free_dqblk(info, buf, *treeblk); + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Wrapper for inserting quota structure into tree */ +static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, + struct dquot *dquot) +{ + int tmp = QT_TREEOFF; + return do_insert_tree(info, dquot, &tmp, 0); +} + +/* + * We don't have to be afraid of deadlocks as we never have quotas on quota files... + */ +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + int type = dquot->dq_type; + struct super_block *sb = dquot->dq_sb; + ssize_t ret; + dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); + + if (!ddquot) + return -ENOMEM; + + /* dq_off is guarded by dqio_mutex */ + if (!dquot->dq_off) { + ret = dq_insert_tree(info, dquot); + if (ret < 0) { + printk(KERN_ERR "VFS: Error %zd occurred while " + "creating quota.\n", ret); + freedqbuf(ddquot); + return ret; + } + } + spin_lock(&dq_data_lock); + info->dqi_ops->mem2disk_dqblk(ddquot, dquot); + spin_unlock(&dq_data_lock); + ret = sb->s_op->quota_write(sb, type, (char *)ddquot, + info->dqi_entry_size, dquot->dq_off); + if (ret != info->dqi_entry_size) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + sb->s_id); + if (ret >= 0) + ret = -ENOSPC; + } else { + ret = 0; + } + dqstats.writes++; + freedqbuf(ddquot); + + return ret; +} +EXPORT_SYMBOL(qtree_write_dquot); + +/* Free dquot entry in data block */ +static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint blk) +{ + struct qt_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0; + + if (!buf) + return -ENOMEM; + if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { + printk(KERN_ERR "VFS: Quota structure has offset to other " + "block (%u) than it should (%u).\n", blk, + (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); + goto out_buf; + } + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); + goto out_buf; + } + dh = (struct qt_disk_dqdbheader *)buf; + le16_add_cpu(&dh->dqdh_entries, -1); + if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ + ret = remove_free_dqentry(info, buf, blk); + if (ret >= 0) + ret = put_free_dqblk(info, buf, blk); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't move quota data block (%u) " + "to free list.\n", blk); + goto out_buf; + } + } else { + memset(buf + + (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), + 0, info->dqi_entry_size); + if (le16_to_cpu(dh->dqdh_entries) == + qtree_dqstr_in_blk(info) - 1) { + /* Insert will write block itself */ + ret = insert_free_dqentry(info, buf, blk); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't insert quota data " + "block (%u) to free entry list.\n", blk); + goto out_buf; + } + } else { + ret = write_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't write quota data " + "block %u\n", blk); + goto out_buf; + } + } + } + dquot->dq_off = 0; /* Quota is now unattached */ +out_buf: + freedqbuf(buf); + return ret; +} + +/* Remove reference to dquot from tree */ +static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, + uint *blk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + int ret = 0; + uint newblk; + __le32 *ref = (__le32 *)buf; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, *blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); + goto out_buf; + } + newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (depth == info->dqi_qtree_depth - 1) { + ret = free_dqentry(info, dquot, newblk); + newblk = 0; + } else { + ret = remove_tree(info, dquot, &newblk, depth+1); + } + if (ret >= 0 && !newblk) { + int i; + ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); + /* Block got empty? */ + for (i = 0; + i < (info->dqi_usable_bs >> 2) && !ref[i]; + i++); + /* Don't put the root block into the free block list */ + if (i == (info->dqi_usable_bs >> 2) + && *blk != QT_TREEOFF) { + put_free_dqblk(info, buf, *blk); + *blk = 0; + } else { + ret = write_blk(info, *blk, buf); + if (ret < 0) + printk(KERN_ERR "VFS: Can't write quota tree " + "block %u.\n", *blk); + } + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Delete dquot from tree */ +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + uint tmp = QT_TREEOFF; + + if (!dquot->dq_off) /* Even not allocated? */ + return 0; + return remove_tree(info, dquot, &tmp, 0); +} +EXPORT_SYMBOL(qtree_delete_dquot); + +/* Find entry in block */ +static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, uint blk) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + loff_t ret = 0; + int i; + char *ddquot; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); + i++, ddquot += info->dqi_entry_size); + if (i == qtree_dqstr_in_blk(info)) { + printk(KERN_ERR "VFS: Quota for id %u referenced " + "but not present.\n", dquot->dq_id); + ret = -EIO; + goto out_buf; + } else { + ret = (blk << info->dqi_blocksize_bits) + sizeof(struct + qt_disk_dqdbheader) + i * info->dqi_entry_size; + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree */ +static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot, uint blk, int depth) +{ + dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + loff_t ret = 0; + __le32 *ref = (__le32 *)buf; + + if (!buf) + return -ENOMEM; + ret = read_blk(info, blk, buf); + if (ret < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + ret = 0; + blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); + if (!blk) /* No reference? */ + goto out_buf; + if (depth < info->dqi_qtree_depth - 1) + ret = find_tree_dqentry(info, dquot, blk, depth+1); + else + ret = find_block_dqentry(info, dquot, blk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree - wrapper function */ +static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, + struct dquot *dquot) +{ + return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); +} + +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + int type = dquot->dq_type; + struct super_block *sb = dquot->dq_sb; + loff_t offset; + dqbuf_t ddquot; + int ret = 0; + +#ifdef __QUOTA_QT_PARANOIA + /* Invalidated quota? */ + if (!sb_dqopt(dquot->dq_sb)->files[type]) { + printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); + return -EIO; + } +#endif + /* Do we know offset of the dquot entry in the quota file? */ + if (!dquot->dq_off) { + offset = find_dqentry(info, dquot); + if (offset <= 0) { /* Entry not present? */ + if (offset < 0) + printk(KERN_ERR "VFS: Can't read quota " + "structure for id %u.\n", dquot->dq_id); + dquot->dq_off = 0; + set_bit(DQ_FAKE_B, &dquot->dq_flags); + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + ret = offset; + goto out; + } + dquot->dq_off = offset; + } + ddquot = getdqbuf(info->dqi_entry_size); + if (!ddquot) + return -ENOMEM; + ret = sb->s_op->quota_read(sb, type, (char *)ddquot, + info->dqi_entry_size, dquot->dq_off); + if (ret != info->dqi_entry_size) { + if (ret >= 0) + ret = -EIO; + printk(KERN_ERR "VFS: Error while reading quota " + "structure for id %u.\n", dquot->dq_id); + set_bit(DQ_FAKE_B, &dquot->dq_flags); + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + freedqbuf(ddquot); + goto out; + } + spin_lock(&dq_data_lock); + info->dqi_ops->disk2mem_dqblk(dquot, ddquot); + if (!dquot->dq_dqb.dqb_bhardlimit && + !dquot->dq_dqb.dqb_bsoftlimit && + !dquot->dq_dqb.dqb_ihardlimit && + !dquot->dq_dqb.dqb_isoftlimit) + set_bit(DQ_FAKE_B, &dquot->dq_flags); + spin_unlock(&dq_data_lock); + freedqbuf(ddquot); +out: + dqstats.reads++; + return ret; +} +EXPORT_SYMBOL(qtree_read_dquot); + +/* Check whether dquot should not be deleted. We know we are + * the only one operating on dquot (thanks to dq_lock) */ +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) +{ + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + return qtree_delete_dquot(info, dquot); + return 0; +} +EXPORT_SYMBOL(qtree_release_dquot); diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h new file mode 100644 index 0000000..a1ab8db --- /dev/null +++ b/fs/quota/quota_tree.h @@ -0,0 +1,25 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTA_TREE_H +#define _LINUX_QUOTA_TREE_H + +#include +#include + +/* + * Structure of header of block with quota structures. It is padded to 16 bytes so + * there will be space for exactly 21 quota-entries in a block + */ +struct qt_disk_dqdbheader { + __le32 dqdh_next_free; /* Number of next block with free entry */ + __le32 dqdh_prev_free; /* Number of previous block with free entry */ + __le16 dqdh_entries; /* Number of valid entries in block */ + __le16 dqdh_pad1; + __le32 dqdh_pad2; +}; + +#define QT_TREEOFF 1 /* Offset of tree in file in blocks */ + +#endif /* _LINUX_QUOTAIO_TREE_H */ diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c new file mode 100644 index 0000000..b4af1c6 --- /dev/null +++ b/fs/quota/quota_v1.c @@ -0,0 +1,218 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quotaio_v1.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Old quota format support"); +MODULE_LICENSE("GPL"); + +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +static inline qsize_t v1_stoqb(qsize_t space) +{ + return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; +} + +static inline qsize_t v1_qbtos(qsize_t blocks) +{ + return blocks << QUOTABLOCK_BITS; +} + +static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) +{ + m->dqb_ihardlimit = d->dqb_ihardlimit; + m->dqb_isoftlimit = d->dqb_isoftlimit; + m->dqb_curinodes = d->dqb_curinodes; + m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit); + m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit); + m->dqb_curspace = v1_qbtos(d->dqb_curblocks); + m->dqb_itime = d->dqb_itime; + m->dqb_btime = d->dqb_btime; +} + +static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) +{ + d->dqb_ihardlimit = m->dqb_ihardlimit; + d->dqb_isoftlimit = m->dqb_isoftlimit; + d->dqb_curinodes = m->dqb_curinodes; + d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit); + d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit); + d->dqb_curblocks = v1_stoqb(m->dqb_curspace); + d->dqb_itime = m->dqb_itime; + d->dqb_btime = m->dqb_btime; +} + +static int v1_read_dqblk(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct v1_disk_dqblk dqblk; + + if (!sb_dqopt(dquot->dq_sb)->files[type]) + return -EINVAL; + + /* Set structure to 0s in case read fails/is after end of file */ + memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); + dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + + v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); + if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && + dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) + set_bit(DQ_FAKE_B, &dquot->dq_flags); + dqstats.reads++; + + return 0; +} + +static int v1_commit_dqblk(struct dquot *dquot) +{ + short type = dquot->dq_type; + ssize_t ret; + struct v1_disk_dqblk dqblk; + + v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); + if (dquot->dq_id == 0) { + dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; + dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; + } + ret = 0; + if (sb_dqopt(dquot->dq_sb)->files[type]) + ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + if (ret != sizeof(struct v1_disk_dqblk)) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + dquot->dq_sb->s_id); + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + +out: + dqstats.writes++; + + return ret; +} + +/* Magics of new quota format */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +/* Header of new quota format */ +struct v2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* File version */ +}; + +static int v1_check_quota_file(struct super_block *sb, int type) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + ulong blocks; + size_t off; + struct v2_disk_dqheader dqhead; + ssize_t size; + loff_t isize; + static const uint quota_magics[] = V2_INITQMAGICS; + + isize = i_size_read(inode); + if (!isize) + return 0; + blocks = isize >> BLOCK_SIZE_BITS; + off = isize & (BLOCK_SIZE - 1); + if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) + return 0; + /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + if (size != sizeof(struct v2_disk_dqheader)) + return 1; /* Probably not new format */ + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) + return 1; /* Definitely not new format */ + printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); + return 0; /* Seems like a new format file -> refuse it */ +} + +static int v1_read_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + struct v1_disk_dqblk dqblk; + int ret; + + if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + /* limits are stored as unsigned 32-bit data */ + dqopt->info[type].dqi_maxblimit = 0xffffffff; + dqopt->info[type].dqi_maxilimit = 0xffffffff; + dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; + dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; +out: + return ret; +} + +static int v1_write_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + struct v1_disk_dqblk dqblk; + int ret; + + dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; + if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + dqblk.dqb_itime = dqopt->info[type].dqi_igrace; + dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; + ret = sb->s_op->quota_write(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0)); + if (ret == sizeof(struct v1_disk_dqblk)) + ret = 0; + else if (ret > 0) + ret = -EIO; +out: + return ret; +} + +static struct quota_format_ops v1_format_ops = { + .check_quota_file = v1_check_quota_file, + .read_file_info = v1_read_file_info, + .write_file_info = v1_write_file_info, + .free_file_info = NULL, + .read_dqblk = v1_read_dqblk, + .commit_dqblk = v1_commit_dqblk, +}; + +static struct quota_format_type v1_quota_format = { + .qf_fmt_id = QFMT_VFS_OLD, + .qf_ops = &v1_format_ops, + .qf_owner = THIS_MODULE +}; + +static int __init init_v1_quota_format(void) +{ + return register_quota_format(&v1_quota_format); +} + +static void __exit exit_v1_quota_format(void) +{ + unregister_quota_format(&v1_quota_format); +} + +module_init(init_v1_quota_format); +module_exit(exit_v1_quota_format); + diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c new file mode 100644 index 0000000..b618b56 --- /dev/null +++ b/fs/quota/quota_v2.c @@ -0,0 +1,236 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "quota_tree.h" +#include "quotaio_v2.h" + +MODULE_AUTHOR("Jan Kara"); +MODULE_DESCRIPTION("Quota format v2 support"); +MODULE_LICENSE("GPL"); + +#define __QUOTA_V2_PARANOIA + +static void v2_mem2diskdqb(void *dp, struct dquot *dquot); +static void v2_disk2memdqb(struct dquot *dquot, void *dp); +static int v2_is_id(void *dp, struct dquot *dquot); + +static struct qtree_fmt_operations v2_qtree_ops = { + .mem2disk_dqblk = v2_mem2diskdqb, + .disk2mem_dqblk = v2_disk2memdqb, + .is_id = v2_is_id, +}; + +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +static inline qsize_t v2_stoqb(qsize_t space) +{ + return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; +} + +static inline qsize_t v2_qbtos(qsize_t blocks) +{ + return blocks << QUOTABLOCK_BITS; +} + +/* Check whether given file is really vfsv0 quotafile */ +static int v2_check_quota_file(struct super_block *sb, int type) +{ + struct v2_disk_dqheader dqhead; + ssize_t size; + static const uint quota_magics[] = V2_INITQMAGICS; + static const uint quota_versions[] = V2_INITQVERSIONS; + + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + if (size != sizeof(struct v2_disk_dqheader)) { + printk("quota_v2: failed read expected=%zd got=%zd\n", + sizeof(struct v2_disk_dqheader), size); + return 0; + } + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || + le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) + return 0; + return 1; +} + +/* Read information header from quota file */ +static int v2_read_file_info(struct super_block *sb, int type) +{ + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct qtree_mem_dqinfo *qinfo; + ssize_t size; + + size = sb->s_op->quota_read(sb, type, (char *)&dinfo, + sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't read info structure on device %s.\n", + sb->s_id); + return -1; + } + info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); + if (!info->dqi_priv) { + printk(KERN_WARNING + "Not enough memory for quota information structure.\n"); + return -1; + } + qinfo = info->dqi_priv; + /* limits are stored as unsigned 32-bit data */ + info->dqi_maxblimit = 0xffffffff; + info->dqi_maxilimit = 0xffffffff; + info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); + info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); + info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); + qinfo->dqi_sb = sb; + qinfo->dqi_type = type; + qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; + qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; + qinfo->dqi_qtree_depth = qtree_depth(qinfo); + qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); + qinfo->dqi_ops = &v2_qtree_ops; + return 0; +} + +/* Write information header to quota file */ +static int v2_write_file_info(struct super_block *sb, int type) +{ + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct qtree_mem_dqinfo *qinfo = info->dqi_priv; + ssize_t size; + + spin_lock(&dq_data_lock); + info->dqi_flags &= ~DQF_INFO_DIRTY; + dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); + dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); + dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); + spin_unlock(&dq_data_lock); + dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); + size = sb->s_op->quota_write(sb, type, (char *)&dinfo, + sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't write info structure on device %s.\n", + sb->s_id); + return -1; + } + return 0; +} + +static void v2_disk2memdqb(struct dquot *dquot, void *dp) +{ + struct v2_disk_dqblk *d = dp, empty; + struct mem_dqblk *m = &dquot->dq_dqb; + + m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); + m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); + m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); + m->dqb_itime = le64_to_cpu(d->dqb_itime); + m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); + m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); + m->dqb_curspace = le64_to_cpu(d->dqb_curspace); + m->dqb_btime = le64_to_cpu(d->dqb_btime); + /* We need to escape back all-zero structure */ + memset(&empty, 0, sizeof(struct v2_disk_dqblk)); + empty.dqb_itime = cpu_to_le64(1); + if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) + m->dqb_itime = 0; +} + +static void v2_mem2diskdqb(void *dp, struct dquot *dquot) +{ + struct v2_disk_dqblk *d = dp; + struct mem_dqblk *m = &dquot->dq_dqb; + struct qtree_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); + d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); + d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); + d->dqb_itime = cpu_to_le64(m->dqb_itime); + d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); + d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(dquot->dq_id); + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); +} + +static int v2_is_id(void *dp, struct dquot *dquot) +{ + struct v2_disk_dqblk *d = dp; + struct qtree_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + if (qtree_entry_unused(info, dp)) + return 0; + return le32_to_cpu(d->dqb_id) == dquot->dq_id; +} + +static int v2_read_dquot(struct dquot *dquot) +{ + return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_write_dquot(struct dquot *dquot) +{ + return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_release_dquot(struct dquot *dquot) +{ + return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); +} + +static int v2_free_file_info(struct super_block *sb, int type) +{ + kfree(sb_dqinfo(sb, type)->dqi_priv); + return 0; +} + +static struct quota_format_ops v2_format_ops = { + .check_quota_file = v2_check_quota_file, + .read_file_info = v2_read_file_info, + .write_file_info = v2_write_file_info, + .free_file_info = v2_free_file_info, + .read_dqblk = v2_read_dquot, + .commit_dqblk = v2_write_dquot, + .release_dqblk = v2_release_dquot, +}; + +static struct quota_format_type v2_quota_format = { + .qf_fmt_id = QFMT_VFS_V0, + .qf_ops = &v2_format_ops, + .qf_owner = THIS_MODULE +}; + +static int __init init_v2_quota_format(void) +{ + return register_quota_format(&v2_quota_format); +} + +static void __exit exit_v2_quota_format(void) +{ + unregister_quota_format(&v2_quota_format); +} + +module_init(init_v2_quota_format); +module_exit(exit_v2_quota_format); diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h new file mode 100644 index 0000000..746654b --- /dev/null +++ b/fs/quota/quotaio_v1.h @@ -0,0 +1,33 @@ +#ifndef _LINUX_QUOTAIO_V1_H +#define _LINUX_QUOTAIO_V1_H + +#include + +/* + * The following constants define the amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). The timer is started when the user crosses + * their soft limit, it is reset when they go below their soft limit. + */ +#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ +#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is an array of these structures + * indexed by user or group number. + */ +struct v1_disk_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ +}; + +#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) + +#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h new file mode 100644 index 0000000..530fe58 --- /dev/null +++ b/fs/quota/quotaio_v2.h @@ -0,0 +1,60 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTAIO_V2_H +#define _LINUX_QUOTAIO_V2_H + +#include +#include + +/* + * Definitions of magics and versions of current quota files + */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +#define V2_INITQVERSIONS {\ + 0, /* USRQUOTA */\ + 0 /* GRPQUOTA */\ +} + +/* First generic header */ +struct v2_disk_dqheader { + __le32 dqh_magic; /* Magic number identifying file */ + __le32 dqh_version; /* File version */ +}; + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is a radix tree whose leaves point + * to blocks of these structures. + */ +struct v2_disk_dqblk { + __le32 dqb_id; /* id this quota applies to */ + __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __le32 dqb_isoftlimit; /* preferred inode limit */ + __le32 dqb_curinodes; /* current # allocated inodes */ + __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ + __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ + __le64 dqb_curspace; /* current space occupied (in bytes) */ + __le64 dqb_btime; /* time limit for excessive disk use */ + __le64 dqb_itime; /* time limit for excessive inode use */ +}; + +/* Header with type and version specific information */ +struct v2_disk_dqinfo { + __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ + __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ + __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ + __le32 dqi_blocks; /* Number of blocks in file */ + __le32 dqi_free_blk; /* Number of first free block in the list */ + __le32 dqi_free_entry; /* Number of block with at least one free entry */ +}; + +#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ +#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */ + +#endif /* _LINUX_QUOTAIO_V2_H */ diff --git a/fs/quota_tree.c b/fs/quota_tree.c deleted file mode 100644 index 953404c..0000000 --- a/fs/quota_tree.c +++ /dev/null @@ -1,645 +0,0 @@ -/* - * vfsv0 quota IO operations on file - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "quota_tree.h" - -MODULE_AUTHOR("Jan Kara"); -MODULE_DESCRIPTION("Quota trie support"); -MODULE_LICENSE("GPL"); - -#define __QUOTA_QT_PARANOIA - -typedef char *dqbuf_t; - -static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) -{ - unsigned int epb = info->dqi_usable_bs >> 2; - - depth = info->dqi_qtree_depth - depth - 1; - while (depth--) - id /= epb; - return id % epb; -} - -/* Number of entries in one blocks */ -static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) -{ - return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) - / info->dqi_entry_size; -} - -static dqbuf_t getdqbuf(size_t size) -{ - dqbuf_t buf = kmalloc(size, GFP_NOFS); - if (!buf) - printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); - return buf; -} - -static inline void freedqbuf(dqbuf_t buf) -{ - kfree(buf); -} - -static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) -{ - struct super_block *sb = info->dqi_sb; - - memset(buf, 0, info->dqi_usable_bs); - return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); -} - -static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) -{ - struct super_block *sb = info->dqi_sb; - - return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); -} - -/* Remove empty block from list and return it */ -static int get_free_dqblk(struct qtree_mem_dqinfo *info) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - int ret, blk; - - if (!buf) - return -ENOMEM; - if (info->dqi_free_blk) { - blk = info->dqi_free_blk; - ret = read_blk(info, blk, buf); - if (ret < 0) - goto out_buf; - info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); - } - else { - memset(buf, 0, info->dqi_usable_bs); - /* Assure block allocation... */ - ret = write_blk(info, info->dqi_blocks, buf); - if (ret < 0) - goto out_buf; - blk = info->dqi_blocks++; - } - mark_info_dirty(info->dqi_sb, info->dqi_type); - ret = blk; -out_buf: - freedqbuf(buf); - return ret; -} - -/* Insert empty block to the list */ -static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) -{ - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - int err; - - dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); - dh->dqdh_prev_free = cpu_to_le32(0); - dh->dqdh_entries = cpu_to_le16(0); - err = write_blk(info, blk, buf); - if (err < 0) - return err; - info->dqi_free_blk = blk; - mark_info_dirty(info->dqi_sb, info->dqi_type); - return 0; -} - -/* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) -{ - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - uint nextblk = le32_to_cpu(dh->dqdh_next_free); - uint prevblk = le32_to_cpu(dh->dqdh_prev_free); - int err; - - if (!tmpbuf) - return -ENOMEM; - if (nextblk) { - err = read_blk(info, nextblk, tmpbuf); - if (err < 0) - goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = - dh->dqdh_prev_free; - err = write_blk(info, nextblk, tmpbuf); - if (err < 0) - goto out_buf; - } - if (prevblk) { - err = read_blk(info, prevblk, tmpbuf); - if (err < 0) - goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = - dh->dqdh_next_free; - err = write_blk(info, prevblk, tmpbuf); - if (err < 0) - goto out_buf; - } else { - info->dqi_free_entry = nextblk; - mark_info_dirty(info->dqi_sb, info->dqi_type); - } - freedqbuf(tmpbuf); - dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); - /* No matter whether write succeeds block is out of list */ - if (write_blk(info, blk, buf) < 0) - printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); - return 0; -out_buf: - freedqbuf(tmpbuf); - return err; -} - -/* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) -{ - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; - int err; - - if (!tmpbuf) - return -ENOMEM; - dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); - dh->dqdh_prev_free = cpu_to_le32(0); - err = write_blk(info, blk, buf); - if (err < 0) - goto out_buf; - if (info->dqi_free_entry) { - err = read_blk(info, info->dqi_free_entry, tmpbuf); - if (err < 0) - goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = - cpu_to_le32(blk); - err = write_blk(info, info->dqi_free_entry, tmpbuf); - if (err < 0) - goto out_buf; - } - freedqbuf(tmpbuf); - info->dqi_free_entry = blk; - mark_info_dirty(info->dqi_sb, info->dqi_type); - return 0; -out_buf: - freedqbuf(tmpbuf); - return err; -} - -/* Is the entry in the block free? */ -int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) -{ - int i; - - for (i = 0; i < info->dqi_entry_size; i++) - if (disk[i]) - return 0; - return 1; -} -EXPORT_SYMBOL(qtree_entry_unused); - -/* Find space for dquot */ -static uint find_free_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, int *err) -{ - uint blk, i; - struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - char *ddquot; - - *err = 0; - if (!buf) { - *err = -ENOMEM; - return 0; - } - dh = (struct qt_disk_dqdbheader *)buf; - if (info->dqi_free_entry) { - blk = info->dqi_free_entry; - *err = read_blk(info, blk, buf); - if (*err < 0) - goto out_buf; - } else { - blk = get_free_dqblk(info); - if ((int)blk < 0) { - *err = blk; - freedqbuf(buf); - return 0; - } - memset(buf, 0, info->dqi_usable_bs); - /* This is enough as block is already zeroed and entry list is empty... */ - info->dqi_free_entry = blk; - mark_info_dirty(dquot->dq_sb, dquot->dq_type); - } - /* Block will be full? */ - if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { - *err = remove_free_dqentry(info, buf, blk); - if (*err < 0) { - printk(KERN_ERR "VFS: find_free_dqentry(): Can't " - "remove block (%u) from entry free list.\n", - blk); - goto out_buf; - } - } - le16_add_cpu(&dh->dqdh_entries, 1); - /* Find free structure in block */ - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); - i++, ddquot += info->dqi_entry_size); -#ifdef __QUOTA_QT_PARANOIA - if (i == qtree_dqstr_in_blk(info)) { - printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " - "but it shouldn't.\n"); - *err = -EIO; - goto out_buf; - } -#endif - *err = write_blk(info, blk, buf); - if (*err < 0) { - printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " - "data block %u.\n", blk); - goto out_buf; - } - dquot->dq_off = (blk << info->dqi_blocksize_bits) + - sizeof(struct qt_disk_dqdbheader) + - i * info->dqi_entry_size; - freedqbuf(buf); - return blk; -out_buf: - freedqbuf(buf); - return 0; -} - -/* Insert reference to structure into the trie */ -static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint *treeblk, int depth) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - int ret = 0, newson = 0, newact = 0; - __le32 *ref; - uint newblk; - - if (!buf) - return -ENOMEM; - if (!*treeblk) { - ret = get_free_dqblk(info); - if (ret < 0) - goto out_buf; - *treeblk = ret; - memset(buf, 0, info->dqi_usable_bs); - newact = 1; - } else { - ret = read_blk(info, *treeblk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read tree quota block " - "%u.\n", *treeblk); - goto out_buf; - } - } - ref = (__le32 *)buf; - newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (!newblk) - newson = 1; - if (depth == info->dqi_qtree_depth - 1) { -#ifdef __QUOTA_QT_PARANOIA - if (newblk) { - printk(KERN_ERR "VFS: Inserting already present quota " - "entry (block %u).\n", - le32_to_cpu(ref[get_index(info, - dquot->dq_id, depth)])); - ret = -EIO; - goto out_buf; - } -#endif - newblk = find_free_dqentry(info, dquot, &ret); - } else { - ret = do_insert_tree(info, dquot, &newblk, depth+1); - } - if (newson && ret >= 0) { - ref[get_index(info, dquot->dq_id, depth)] = - cpu_to_le32(newblk); - ret = write_blk(info, *treeblk, buf); - } else if (newact && ret < 0) { - put_free_dqblk(info, buf, *treeblk); - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Wrapper for inserting quota structure into tree */ -static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, - struct dquot *dquot) -{ - int tmp = QT_TREEOFF; - return do_insert_tree(info, dquot, &tmp, 0); -} - -/* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... - */ -int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - int type = dquot->dq_type; - struct super_block *sb = dquot->dq_sb; - ssize_t ret; - dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); - - if (!ddquot) - return -ENOMEM; - - /* dq_off is guarded by dqio_mutex */ - if (!dquot->dq_off) { - ret = dq_insert_tree(info, dquot); - if (ret < 0) { - printk(KERN_ERR "VFS: Error %zd occurred while " - "creating quota.\n", ret); - freedqbuf(ddquot); - return ret; - } - } - spin_lock(&dq_data_lock); - info->dqi_ops->mem2disk_dqblk(ddquot, dquot); - spin_unlock(&dq_data_lock); - ret = sb->s_op->quota_write(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); - if (ret != info->dqi_entry_size) { - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", - sb->s_id); - if (ret >= 0) - ret = -ENOSPC; - } else { - ret = 0; - } - dqstats.writes++; - freedqbuf(ddquot); - - return ret; -} -EXPORT_SYMBOL(qtree_write_dquot); - -/* Free dquot entry in data block */ -static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint blk) -{ - struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - int ret = 0; - - if (!buf) - return -ENOMEM; - if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { - printk(KERN_ERR "VFS: Quota structure has offset to other " - "block (%u) than it should (%u).\n", blk, - (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); - goto out_buf; - } - ret = read_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); - goto out_buf; - } - dh = (struct qt_disk_dqdbheader *)buf; - le16_add_cpu(&dh->dqdh_entries, -1); - if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ - ret = remove_free_dqentry(info, buf, blk); - if (ret >= 0) - ret = put_free_dqblk(info, buf, blk); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't move quota data block (%u) " - "to free list.\n", blk); - goto out_buf; - } - } else { - memset(buf + - (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), - 0, info->dqi_entry_size); - if (le16_to_cpu(dh->dqdh_entries) == - qtree_dqstr_in_blk(info) - 1) { - /* Insert will write block itself */ - ret = insert_free_dqentry(info, buf, blk); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't insert quota data " - "block (%u) to free entry list.\n", blk); - goto out_buf; - } - } else { - ret = write_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't write quota data " - "block %u\n", blk); - goto out_buf; - } - } - } - dquot->dq_off = 0; /* Quota is now unattached */ -out_buf: - freedqbuf(buf); - return ret; -} - -/* Remove reference to dquot from tree */ -static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, - uint *blk, int depth) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - int ret = 0; - uint newblk; - __le32 *ref = (__le32 *)buf; - - if (!buf) - return -ENOMEM; - ret = read_blk(info, *blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); - goto out_buf; - } - newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (depth == info->dqi_qtree_depth - 1) { - ret = free_dqentry(info, dquot, newblk); - newblk = 0; - } else { - ret = remove_tree(info, dquot, &newblk, depth+1); - } - if (ret >= 0 && !newblk) { - int i; - ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); - /* Block got empty? */ - for (i = 0; - i < (info->dqi_usable_bs >> 2) && !ref[i]; - i++); - /* Don't put the root block into the free block list */ - if (i == (info->dqi_usable_bs >> 2) - && *blk != QT_TREEOFF) { - put_free_dqblk(info, buf, *blk); - *blk = 0; - } else { - ret = write_blk(info, *blk, buf); - if (ret < 0) - printk(KERN_ERR "VFS: Can't write quota tree " - "block %u.\n", *blk); - } - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Delete dquot from tree */ -int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - uint tmp = QT_TREEOFF; - - if (!dquot->dq_off) /* Even not allocated? */ - return 0; - return remove_tree(info, dquot, &tmp, 0); -} -EXPORT_SYMBOL(qtree_delete_dquot); - -/* Find entry in block */ -static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, uint blk) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - loff_t ret = 0; - int i; - char *ddquot; - - if (!buf) - return -ENOMEM; - ret = read_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); - goto out_buf; - } - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); - i++, ddquot += info->dqi_entry_size); - if (i == qtree_dqstr_in_blk(info)) { - printk(KERN_ERR "VFS: Quota for id %u referenced " - "but not present.\n", dquot->dq_id); - ret = -EIO; - goto out_buf; - } else { - ret = (blk << info->dqi_blocksize_bits) + sizeof(struct - qt_disk_dqdbheader) + i * info->dqi_entry_size; - } -out_buf: - freedqbuf(buf); - return ret; -} - -/* Find entry for given id in the tree */ -static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot, uint blk, int depth) -{ - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); - loff_t ret = 0; - __le32 *ref = (__le32 *)buf; - - if (!buf) - return -ENOMEM; - ret = read_blk(info, blk, buf); - if (ret < 0) { - printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); - goto out_buf; - } - ret = 0; - blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (!blk) /* No reference? */ - goto out_buf; - if (depth < info->dqi_qtree_depth - 1) - ret = find_tree_dqentry(info, dquot, blk, depth+1); - else - ret = find_block_dqentry(info, dquot, blk); -out_buf: - freedqbuf(buf); - return ret; -} - -/* Find entry for given id in the tree - wrapper function */ -static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, - struct dquot *dquot) -{ - return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); -} - -int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - int type = dquot->dq_type; - struct super_block *sb = dquot->dq_sb; - loff_t offset; - dqbuf_t ddquot; - int ret = 0; - -#ifdef __QUOTA_QT_PARANOIA - /* Invalidated quota? */ - if (!sb_dqopt(dquot->dq_sb)->files[type]) { - printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); - return -EIO; - } -#endif - /* Do we know offset of the dquot entry in the quota file? */ - if (!dquot->dq_off) { - offset = find_dqentry(info, dquot); - if (offset <= 0) { /* Entry not present? */ - if (offset < 0) - printk(KERN_ERR "VFS: Can't read quota " - "structure for id %u.\n", dquot->dq_id); - dquot->dq_off = 0; - set_bit(DQ_FAKE_B, &dquot->dq_flags); - memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - ret = offset; - goto out; - } - dquot->dq_off = offset; - } - ddquot = getdqbuf(info->dqi_entry_size); - if (!ddquot) - return -ENOMEM; - ret = sb->s_op->quota_read(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); - if (ret != info->dqi_entry_size) { - if (ret >= 0) - ret = -EIO; - printk(KERN_ERR "VFS: Error while reading quota " - "structure for id %u.\n", dquot->dq_id); - set_bit(DQ_FAKE_B, &dquot->dq_flags); - memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - freedqbuf(ddquot); - goto out; - } - spin_lock(&dq_data_lock); - info->dqi_ops->disk2mem_dqblk(dquot, ddquot); - if (!dquot->dq_dqb.dqb_bhardlimit && - !dquot->dq_dqb.dqb_bsoftlimit && - !dquot->dq_dqb.dqb_ihardlimit && - !dquot->dq_dqb.dqb_isoftlimit) - set_bit(DQ_FAKE_B, &dquot->dq_flags); - spin_unlock(&dq_data_lock); - freedqbuf(ddquot); -out: - dqstats.reads++; - return ret; -} -EXPORT_SYMBOL(qtree_read_dquot); - -/* Check whether dquot should not be deleted. We know we are - * the only one operating on dquot (thanks to dq_lock) */ -int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) -{ - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) - return qtree_delete_dquot(info, dquot); - return 0; -} -EXPORT_SYMBOL(qtree_release_dquot); diff --git a/fs/quota_tree.h b/fs/quota_tree.h deleted file mode 100644 index a1ab8db..0000000 --- a/fs/quota_tree.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Definitions of structures for vfsv0 quota format - */ - -#ifndef _LINUX_QUOTA_TREE_H -#define _LINUX_QUOTA_TREE_H - -#include -#include - -/* - * Structure of header of block with quota structures. It is padded to 16 bytes so - * there will be space for exactly 21 quota-entries in a block - */ -struct qt_disk_dqdbheader { - __le32 dqdh_next_free; /* Number of next block with free entry */ - __le32 dqdh_prev_free; /* Number of previous block with free entry */ - __le16 dqdh_entries; /* Number of valid entries in block */ - __le16 dqdh_pad1; - __le32 dqdh_pad2; -}; - -#define QT_TREEOFF 1 /* Offset of tree in file in blocks */ - -#endif /* _LINUX_QUOTAIO_TREE_H */ diff --git a/fs/quota_v1.c b/fs/quota_v1.c deleted file mode 100644 index b4af1c6..0000000 --- a/fs/quota_v1.c +++ /dev/null @@ -1,218 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "quotaio_v1.h" - -MODULE_AUTHOR("Jan Kara"); -MODULE_DESCRIPTION("Old quota format support"); -MODULE_LICENSE("GPL"); - -#define QUOTABLOCK_BITS 10 -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) - -static inline qsize_t v1_stoqb(qsize_t space) -{ - return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; -} - -static inline qsize_t v1_qbtos(qsize_t blocks) -{ - return blocks << QUOTABLOCK_BITS; -} - -static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) -{ - m->dqb_ihardlimit = d->dqb_ihardlimit; - m->dqb_isoftlimit = d->dqb_isoftlimit; - m->dqb_curinodes = d->dqb_curinodes; - m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit); - m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit); - m->dqb_curspace = v1_qbtos(d->dqb_curblocks); - m->dqb_itime = d->dqb_itime; - m->dqb_btime = d->dqb_btime; -} - -static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) -{ - d->dqb_ihardlimit = m->dqb_ihardlimit; - d->dqb_isoftlimit = m->dqb_isoftlimit; - d->dqb_curinodes = m->dqb_curinodes; - d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit); - d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit); - d->dqb_curblocks = v1_stoqb(m->dqb_curspace); - d->dqb_itime = m->dqb_itime; - d->dqb_btime = m->dqb_btime; -} - -static int v1_read_dqblk(struct dquot *dquot) -{ - int type = dquot->dq_type; - struct v1_disk_dqblk dqblk; - - if (!sb_dqopt(dquot->dq_sb)->files[type]) - return -EINVAL; - - /* Set structure to 0s in case read fails/is after end of file */ - memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); - dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); - - v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); - if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && - dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) - set_bit(DQ_FAKE_B, &dquot->dq_flags); - dqstats.reads++; - - return 0; -} - -static int v1_commit_dqblk(struct dquot *dquot) -{ - short type = dquot->dq_type; - ssize_t ret; - struct v1_disk_dqblk dqblk; - - v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); - if (dquot->dq_id == 0) { - dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; - dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; - } - ret = 0; - if (sb_dqopt(dquot->dq_sb)->files[type]) - ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); - if (ret != sizeof(struct v1_disk_dqblk)) { - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", - dquot->dq_sb->s_id); - if (ret >= 0) - ret = -EIO; - goto out; - } - ret = 0; - -out: - dqstats.writes++; - - return ret; -} - -/* Magics of new quota format */ -#define V2_INITQMAGICS {\ - 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ -} - -/* Header of new quota format */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - -static int v1_check_quota_file(struct super_block *sb, int type) -{ - struct inode *inode = sb_dqopt(sb)->files[type]; - ulong blocks; - size_t off; - struct v2_disk_dqheader dqhead; - ssize_t size; - loff_t isize; - static const uint quota_magics[] = V2_INITQMAGICS; - - isize = i_size_read(inode); - if (!isize) - return 0; - blocks = isize >> BLOCK_SIZE_BITS; - off = isize & (BLOCK_SIZE - 1); - if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) - return 0; - /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); - if (size != sizeof(struct v2_disk_dqheader)) - return 1; /* Probably not new format */ - if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) - return 1; /* Definitely not new format */ - printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); - return 0; /* Seems like a new format file -> refuse it */ -} - -static int v1_read_file_info(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - struct v1_disk_dqblk dqblk; - int ret; - - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { - if (ret >= 0) - ret = -EIO; - goto out; - } - ret = 0; - /* limits are stored as unsigned 32-bit data */ - dqopt->info[type].dqi_maxblimit = 0xffffffff; - dqopt->info[type].dqi_maxilimit = 0xffffffff; - dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; - dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; -out: - return ret; -} - -static int v1_write_file_info(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - struct v1_disk_dqblk dqblk; - int ret; - - dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { - if (ret >= 0) - ret = -EIO; - goto out; - } - dqblk.dqb_itime = dqopt->info[type].dqi_igrace; - dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; - ret = sb->s_op->quota_write(sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(0)); - if (ret == sizeof(struct v1_disk_dqblk)) - ret = 0; - else if (ret > 0) - ret = -EIO; -out: - return ret; -} - -static struct quota_format_ops v1_format_ops = { - .check_quota_file = v1_check_quota_file, - .read_file_info = v1_read_file_info, - .write_file_info = v1_write_file_info, - .free_file_info = NULL, - .read_dqblk = v1_read_dqblk, - .commit_dqblk = v1_commit_dqblk, -}; - -static struct quota_format_type v1_quota_format = { - .qf_fmt_id = QFMT_VFS_OLD, - .qf_ops = &v1_format_ops, - .qf_owner = THIS_MODULE -}; - -static int __init init_v1_quota_format(void) -{ - return register_quota_format(&v1_quota_format); -} - -static void __exit exit_v1_quota_format(void) -{ - unregister_quota_format(&v1_quota_format); -} - -module_init(init_v1_quota_format); -module_exit(exit_v1_quota_format); - diff --git a/fs/quota_v2.c b/fs/quota_v2.c deleted file mode 100644 index b618b56..0000000 --- a/fs/quota_v2.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * vfsv0 quota IO operations on file - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "quota_tree.h" -#include "quotaio_v2.h" - -MODULE_AUTHOR("Jan Kara"); -MODULE_DESCRIPTION("Quota format v2 support"); -MODULE_LICENSE("GPL"); - -#define __QUOTA_V2_PARANOIA - -static void v2_mem2diskdqb(void *dp, struct dquot *dquot); -static void v2_disk2memdqb(struct dquot *dquot, void *dp); -static int v2_is_id(void *dp, struct dquot *dquot); - -static struct qtree_fmt_operations v2_qtree_ops = { - .mem2disk_dqblk = v2_mem2diskdqb, - .disk2mem_dqblk = v2_disk2memdqb, - .is_id = v2_is_id, -}; - -#define QUOTABLOCK_BITS 10 -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) - -static inline qsize_t v2_stoqb(qsize_t space) -{ - return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; -} - -static inline qsize_t v2_qbtos(qsize_t blocks) -{ - return blocks << QUOTABLOCK_BITS; -} - -/* Check whether given file is really vfsv0 quotafile */ -static int v2_check_quota_file(struct super_block *sb, int type) -{ - struct v2_disk_dqheader dqhead; - ssize_t size; - static const uint quota_magics[] = V2_INITQMAGICS; - static const uint quota_versions[] = V2_INITQVERSIONS; - - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); - if (size != sizeof(struct v2_disk_dqheader)) { - printk("quota_v2: failed read expected=%zd got=%zd\n", - sizeof(struct v2_disk_dqheader), size); - return 0; - } - if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || - le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) - return 0; - return 1; -} - -/* Read information header from quota file */ -static int v2_read_file_info(struct super_block *sb, int type) -{ - struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct qtree_mem_dqinfo *qinfo; - ssize_t size; - - size = sb->s_op->quota_read(sb, type, (char *)&dinfo, - sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); - if (size != sizeof(struct v2_disk_dqinfo)) { - printk(KERN_WARNING "Can't read info structure on device %s.\n", - sb->s_id); - return -1; - } - info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); - if (!info->dqi_priv) { - printk(KERN_WARNING - "Not enough memory for quota information structure.\n"); - return -1; - } - qinfo = info->dqi_priv; - /* limits are stored as unsigned 32-bit data */ - info->dqi_maxblimit = 0xffffffff; - info->dqi_maxilimit = 0xffffffff; - info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); - info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); - info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); - qinfo->dqi_sb = sb; - qinfo->dqi_type = type; - qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); - qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); - qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); - qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; - qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; - qinfo->dqi_qtree_depth = qtree_depth(qinfo); - qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); - qinfo->dqi_ops = &v2_qtree_ops; - return 0; -} - -/* Write information header to quota file */ -static int v2_write_file_info(struct super_block *sb, int type) -{ - struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqinfo(sb, type); - struct qtree_mem_dqinfo *qinfo = info->dqi_priv; - ssize_t size; - - spin_lock(&dq_data_lock); - info->dqi_flags &= ~DQF_INFO_DIRTY; - dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); - dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); - dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); - spin_unlock(&dq_data_lock); - dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); - dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); - dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); - size = sb->s_op->quota_write(sb, type, (char *)&dinfo, - sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); - if (size != sizeof(struct v2_disk_dqinfo)) { - printk(KERN_WARNING "Can't write info structure on device %s.\n", - sb->s_id); - return -1; - } - return 0; -} - -static void v2_disk2memdqb(struct dquot *dquot, void *dp) -{ - struct v2_disk_dqblk *d = dp, empty; - struct mem_dqblk *m = &dquot->dq_dqb; - - m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); - m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); - m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); - m->dqb_itime = le64_to_cpu(d->dqb_itime); - m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); - m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); - m->dqb_curspace = le64_to_cpu(d->dqb_curspace); - m->dqb_btime = le64_to_cpu(d->dqb_btime); - /* We need to escape back all-zero structure */ - memset(&empty, 0, sizeof(struct v2_disk_dqblk)); - empty.dqb_itime = cpu_to_le64(1); - if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) - m->dqb_itime = 0; -} - -static void v2_mem2diskdqb(void *dp, struct dquot *dquot) -{ - struct v2_disk_dqblk *d = dp; - struct mem_dqblk *m = &dquot->dq_dqb; - struct qtree_mem_dqinfo *info = - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; - - d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); - d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); - d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); - d->dqb_itime = cpu_to_le64(m->dqb_itime); - d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); - d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); - d->dqb_curspace = cpu_to_le64(m->dqb_curspace); - d->dqb_btime = cpu_to_le64(m->dqb_btime); - d->dqb_id = cpu_to_le32(dquot->dq_id); - if (qtree_entry_unused(info, dp)) - d->dqb_itime = cpu_to_le64(1); -} - -static int v2_is_id(void *dp, struct dquot *dquot) -{ - struct v2_disk_dqblk *d = dp; - struct qtree_mem_dqinfo *info = - sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; - - if (qtree_entry_unused(info, dp)) - return 0; - return le32_to_cpu(d->dqb_id) == dquot->dq_id; -} - -static int v2_read_dquot(struct dquot *dquot) -{ - return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); -} - -static int v2_write_dquot(struct dquot *dquot) -{ - return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); -} - -static int v2_release_dquot(struct dquot *dquot) -{ - return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); -} - -static int v2_free_file_info(struct super_block *sb, int type) -{ - kfree(sb_dqinfo(sb, type)->dqi_priv); - return 0; -} - -static struct quota_format_ops v2_format_ops = { - .check_quota_file = v2_check_quota_file, - .read_file_info = v2_read_file_info, - .write_file_info = v2_write_file_info, - .free_file_info = v2_free_file_info, - .read_dqblk = v2_read_dquot, - .commit_dqblk = v2_write_dquot, - .release_dqblk = v2_release_dquot, -}; - -static struct quota_format_type v2_quota_format = { - .qf_fmt_id = QFMT_VFS_V0, - .qf_ops = &v2_format_ops, - .qf_owner = THIS_MODULE -}; - -static int __init init_v2_quota_format(void) -{ - return register_quota_format(&v2_quota_format); -} - -static void __exit exit_v2_quota_format(void) -{ - unregister_quota_format(&v2_quota_format); -} - -module_init(init_v2_quota_format); -module_exit(exit_v2_quota_format); diff --git a/fs/quotaio_v1.h b/fs/quotaio_v1.h deleted file mode 100644 index 746654b..0000000 --- a/fs/quotaio_v1.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _LINUX_QUOTAIO_V1_H -#define _LINUX_QUOTAIO_V1_H - -#include - -/* - * The following constants define the amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). The timer is started when the user crosses - * their soft limit, it is reset when they go below their soft limit. - */ -#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ -#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is an array of these structures - * indexed by user or group number. - */ -struct v1_disk_dqblk { - __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ - __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ - __u32 dqb_curblocks; /* current block count */ - __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __u32 dqb_isoftlimit; /* preferred inode limit */ - __u32 dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ -}; - -#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) - -#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/fs/quotaio_v2.h b/fs/quotaio_v2.h deleted file mode 100644 index 530fe58..0000000 --- a/fs/quotaio_v2.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Definitions of structures for vfsv0 quota format - */ - -#ifndef _LINUX_QUOTAIO_V2_H -#define _LINUX_QUOTAIO_V2_H - -#include -#include - -/* - * Definitions of magics and versions of current quota files - */ -#define V2_INITQMAGICS {\ - 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ -} - -#define V2_INITQVERSIONS {\ - 0, /* USRQUOTA */\ - 0 /* GRPQUOTA */\ -} - -/* First generic header */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is a radix tree whose leaves point - * to blocks of these structures. - */ -struct v2_disk_dqblk { - __le32 dqb_id; /* id this quota applies to */ - __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __le32 dqb_isoftlimit; /* preferred inode limit */ - __le32 dqb_curinodes; /* current # allocated inodes */ - __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ - __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ - __le64 dqb_curspace; /* current space occupied (in bytes) */ - __le64 dqb_btime; /* time limit for excessive disk use */ - __le64 dqb_itime; /* time limit for excessive inode use */ -}; - -/* Header with type and version specific information */ -struct v2_disk_dqinfo { - __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ - __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ - __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ - __le32 dqi_blocks; /* Number of blocks in file */ - __le32 dqi_free_blk; /* Number of first free block in the list */ - __le32 dqi_free_entry; /* Number of block with at least one free entry */ -}; - -#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ -#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */ - -#endif /* _LINUX_QUOTAIO_V2_H */ -- cgit v0.10.2 From c516610cfec5c50f84ff8cc315628548481f4990 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 15:32:46 +0100 Subject: quota: Make global quota locks cacheline aligned Andrew Morton has suggested that three global quota locks can end up in the same cacheline which can result in bad cacheline ping-pong on SMP machines. Make locks cacheline aligned so that we avoid this problem (thanks goes to Andrew for the idea). Signed-off-by: Jan Kara CC: Andrew Morton diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 28aa146..e840fa2 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -129,9 +129,9 @@ * i_mutex on quota files is special (it's below dqio_mutex) */ -static DEFINE_SPINLOCK(dq_list_lock); -static DEFINE_SPINLOCK(dq_state_lock); -DEFINE_SPINLOCK(dq_data_lock); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); static char *quotatypes[] = INITQFNAMES; -- cgit v0.10.2 From dd6f3c6d5a26a282521f15a183fdc2d6f35cfa0f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:01:43 +0100 Subject: quota: Remove NODQUOT macro Remove this macro which is just a definition of NULL. Fix a few coding style issues along the way. Signed-off-by: Jan Kara diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index e840fa2..4881db3 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -253,7 +253,7 @@ static inline struct dquot *find_dquot(unsigned int hashent, struct super_block if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) return dquot; } - return NODQUOT; + return NULL; } /* Add a dquot to the tail of the free list */ @@ -696,7 +696,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) dquot = sb->dq_op->alloc_dquot(sb, type); if(!dquot) - return NODQUOT; + return NULL; mutex_init(&dquot->dq_lock); INIT_LIST_HEAD(&dquot->dq_free); @@ -722,10 +722,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { unsigned int hashent = hashfn(sb, id, type); - struct dquot *dquot = NODQUOT, *empty = NODQUOT; + struct dquot *dquot = NULL, *empty = NULL; if (!sb_has_quota_active(sb, type)) - return NODQUOT; + return NULL; we_slept: spin_lock(&dq_list_lock); spin_lock(&dq_state_lock); @@ -736,15 +736,17 @@ we_slept: } spin_unlock(&dq_state_lock); - if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { - if (empty == NODQUOT) { + dquot = find_dquot(hashent, sb, id, type); + if (!dquot) { + if (!empty) { spin_unlock(&dq_list_lock); - if ((empty = get_empty_dquot(sb, type)) == NODQUOT) + empty = get_empty_dquot(sb, type); + if (!empty) schedule(); /* Try to wait for a moment... */ goto we_slept; } dquot = empty; - empty = NODQUOT; + empty = NULL; dquot->dq_id = id; /* all dquots go on the inuse_list */ put_inuse(dquot); @@ -766,7 +768,7 @@ we_slept: /* Read the dquot and instantiate it (everything done only if needed) */ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { dqput(dquot); - dquot = NODQUOT; + dquot = NULL; goto out; } #ifdef __DQUOT_PARANOIA @@ -787,9 +789,9 @@ static int dqinit_needed(struct inode *inode, int type) if (IS_NOQUOTA(inode)) return 0; if (type != -1) - return inode->i_dquot[type] == NODQUOT; + return !inode->i_dquot[type]; for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) return 1; return 0; } @@ -840,8 +842,8 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, { struct dquot *dquot = inode->i_dquot[type]; - inode->i_dquot[type] = NODQUOT; - if (dquot != NODQUOT) { + inode->i_dquot[type] = NULL; + if (dquot) { if (dqput_blocks(dquot)) { #ifdef __DQUOT_PARANOIA if (atomic_read(&dquot->dq_count) != 1) @@ -1112,7 +1114,7 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype) int i; for (i = 0; i < MAXQUOTAS; i++) - if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && + if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && !warning_issued(dquots[i], warntype[i])) { #ifdef CONFIG_PRINT_QUOTA_WARNING print_warning(dquots[i], warntype[i]); @@ -1249,7 +1251,7 @@ int dquot_initialize(struct inode *inode, int type) { unsigned int id = 0; int cnt, ret = 0; - struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; + struct dquot *got[MAXQUOTAS] = { NULL, NULL }; struct super_block *sb = inode->i_sb; /* First test before acquiring mutex - solves deadlocks when we @@ -1282,9 +1284,9 @@ int dquot_initialize(struct inode *inode, int type) /* Avoid races with quotaoff() */ if (!sb_has_quota_active(sb, cnt)) continue; - if (inode->i_dquot[cnt] == NODQUOT) { + if (!inode->i_dquot[cnt]) { inode->i_dquot[cnt] = got[cnt]; - got[cnt] = NODQUOT; + got[cnt] = NULL; } } out_err: @@ -1307,7 +1309,7 @@ int dquot_drop(struct inode *inode) down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { put[cnt] = inode->i_dquot[cnt]; - inode->i_dquot[cnt] = NODQUOT; + inode->i_dquot[cnt] = NULL; } up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1332,7 +1334,7 @@ void vfs_dq_drop(struct inode *inode) * must assure that nobody can come after the DQUOT_DROP and * add quota pointers back anyway */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] != NODQUOT) + if (inode->i_dquot[cnt]) break; if (cnt < MAXQUOTAS) inode->i_sb->dq_op->drop(inode); @@ -1363,7 +1365,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) { @@ -1372,7 +1374,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, } } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; if (reserve) dquot_resv_space(inode->i_dquot[cnt], number); @@ -1461,14 +1463,14 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) goto warn_put_all; } for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; dquot_incr_inodes(inode->i_dquot[cnt], number); } @@ -1506,7 +1508,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number) spin_lock(&dq_data_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) + if (inode->i_dquot[cnt]) dquot_claim_reserved_space(inode->i_dquot[cnt], number); } @@ -1540,7 +1542,7 @@ void dquot_release_reserved_space(struct inode *inode, qsize_t number) spin_lock(&dq_data_lock); /* Release reserved dquots */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] != NODQUOT) + if (inode->i_dquot[cnt]) dquot_free_reserved_space(inode->i_dquot[cnt], number); } spin_unlock(&dq_data_lock); @@ -1576,7 +1578,7 @@ out_sub: } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); dquot_decr_space(inode->i_dquot[cnt], number); @@ -1614,7 +1616,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) } spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (inode->i_dquot[cnt] == NODQUOT) + if (!inode->i_dquot[cnt]) continue; warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); dquot_decr_inodes(inode->i_dquot[cnt], number); @@ -1667,8 +1669,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) return QUOTA_OK; /* Initialize the arrays */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - transfer_from[cnt] = NODQUOT; - transfer_to[cnt] = NODQUOT; + transfer_from[cnt] = NULL; + transfer_to[cnt] = NULL; warntype_to[cnt] = QUOTA_NL_NOWARN; switch (cnt) { case USRQUOTA: @@ -1696,7 +1698,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) space = cur_space + rsv_space; /* Build the transfer_from list and check the limits */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (transfer_to[cnt] == NODQUOT) + if (!transfer_to[cnt]) continue; transfer_from[cnt] = inode->i_dquot[cnt]; if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == @@ -1712,7 +1714,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) /* * Skip changes for same uid or gid or for turned off quota-type. */ - if (transfer_to[cnt] == NODQUOT) + if (!transfer_to[cnt]) continue; /* Due to IO error we might not have transfer_from[] structure */ @@ -1743,7 +1745,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) if (transfer_to[cnt]) { mark_dquot_dirty(transfer_to[cnt]); /* The reference we got is transferred to the inode */ - transfer_to[cnt] = NODQUOT; + transfer_to[cnt] = NULL; } } warn_put_all: @@ -1761,7 +1763,7 @@ over_quota: up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); /* Clear dquot pointers we don't want to dqput() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) - transfer_from[cnt] = NODQUOT; + transfer_from[cnt] = NULL; ret = NO_QUOTA; goto warn_put_all; } @@ -2256,7 +2258,7 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d struct dquot *dquot; dquot = dqget(sb, id, type); - if (dquot == NODQUOT) + if (!dquot) return -ESRCH; do_get_dqblk(dquot, di); dqput(dquot); diff --git a/include/linux/quota.h b/include/linux/quota.h index a510d91..78c4889 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -277,8 +277,6 @@ struct dquot { struct mem_dqblk dq_dqb; /* Diskquota usage */ }; -#define NODQUOT (struct dquot *)NULL - #define QUOTA_OK 0 #define NO_QUOTA 1 -- cgit v0.10.2 From d26ac1a8128f6e1fc467f145acaa9f9bf9e16b75 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:17:50 +0100 Subject: quota: Remove dqbuf_t and other cleanups Remove bogus typedef which is just a definition of char *. Remove unnecessary type casts. Substitute freedqbuf() with kfree. Signed-off-by: Jan Kara diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 953404c..3a5a90c 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -22,8 +22,6 @@ MODULE_LICENSE("GPL"); #define __QUOTA_QT_PARANOIA -typedef char *dqbuf_t; - static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) { unsigned int epb = info->dqi_usable_bs >> 2; @@ -41,40 +39,35 @@ static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) / info->dqi_entry_size; } -static dqbuf_t getdqbuf(size_t size) +static char *getdqbuf(size_t size) { - dqbuf_t buf = kmalloc(size, GFP_NOFS); + char *buf = kmalloc(size, GFP_NOFS); if (!buf) printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); return buf; } -static inline void freedqbuf(dqbuf_t buf) -{ - kfree(buf); -} - -static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; memset(buf, 0, info->dqi_usable_bs); - return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, + return sb->s_op->quota_read(sb, info->dqi_type, buf, info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } -static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) +static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; - return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, + return sb->s_op->quota_write(sb, info->dqi_type, buf, info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } /* Remove empty block from list and return it */ static int get_free_dqblk(struct qtree_mem_dqinfo *info) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int ret, blk; @@ -98,12 +91,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info) mark_info_dirty(info->dqi_sb, info->dqi_type); ret = blk; out_buf: - freedqbuf(buf); + kfree(buf); return ret; } /* Insert empty block to the list */ -static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) { struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int err; @@ -120,9 +113,9 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) } /* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) { - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; uint nextblk = le32_to_cpu(dh->dqdh_next_free); uint prevblk = le32_to_cpu(dh->dqdh_prev_free); @@ -153,21 +146,21 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint info->dqi_free_entry = nextblk; mark_info_dirty(info->dqi_sb, info->dqi_type); } - freedqbuf(tmpbuf); + kfree(tmpbuf); dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); /* No matter whether write succeeds block is out of list */ if (write_blk(info, blk, buf) < 0) printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); return 0; out_buf: - freedqbuf(tmpbuf); + kfree(tmpbuf); return err; } /* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) { - dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); + char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; int err; @@ -188,12 +181,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint if (err < 0) goto out_buf; } - freedqbuf(tmpbuf); + kfree(tmpbuf); info->dqi_free_entry = blk; mark_info_dirty(info->dqi_sb, info->dqi_type); return 0; out_buf: - freedqbuf(tmpbuf); + kfree(tmpbuf); return err; } @@ -215,7 +208,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, { uint blk, i; struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); char *ddquot; *err = 0; @@ -233,7 +226,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk = get_free_dqblk(info); if ((int)blk < 0) { *err = blk; - freedqbuf(buf); + kfree(buf); return 0; } memset(buf, 0, info->dqi_usable_bs); @@ -253,9 +246,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, } le16_add_cpu(&dh->dqdh_entries, 1); /* Find free structure in block */ - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); - i++, ddquot += info->dqi_entry_size); + i++, ddquot += info->dqi_entry_size) + ; #ifdef __QUOTA_QT_PARANOIA if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " @@ -273,10 +267,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, dquot->dq_off = (blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; - freedqbuf(buf); + kfree(buf); return blk; out_buf: - freedqbuf(buf); + kfree(buf); return 0; } @@ -284,7 +278,7 @@ out_buf: static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *treeblk, int depth) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0, newson = 0, newact = 0; __le32 *ref; uint newblk; @@ -333,7 +327,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, put_free_dqblk(info, buf, *treeblk); } out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -353,7 +347,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_type; struct super_block *sb = dquot->dq_sb; ssize_t ret; - dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); + char *ddquot = getdqbuf(info->dqi_entry_size); if (!ddquot) return -ENOMEM; @@ -364,15 +358,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) if (ret < 0) { printk(KERN_ERR "VFS: Error %zd occurred while " "creating quota.\n", ret); - freedqbuf(ddquot); + kfree(ddquot); return ret; } } spin_lock(&dq_data_lock); info->dqi_ops->mem2disk_dqblk(ddquot, dquot); spin_unlock(&dq_data_lock); - ret = sb->s_op->quota_write(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); + ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, + dquot->dq_off); if (ret != info->dqi_entry_size) { printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", sb->s_id); @@ -382,7 +376,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) ret = 0; } dqstats.writes++; - freedqbuf(ddquot); + kfree(ddquot); return ret; } @@ -393,7 +387,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { struct qt_disk_dqdbheader *dh; - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0; if (!buf) @@ -444,7 +438,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, } dquot->dq_off = 0; /* Quota is now unattached */ out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -452,7 +446,7 @@ out_buf: static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *blk, int depth) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0; uint newblk; __le32 *ref = (__le32 *)buf; @@ -475,9 +469,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, int i; ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); /* Block got empty? */ - for (i = 0; - i < (info->dqi_usable_bs >> 2) && !ref[i]; - i++); + for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) + ; /* Don't put the root block into the free block list */ if (i == (info->dqi_usable_bs >> 2) && *blk != QT_TREEOFF) { @@ -491,7 +484,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, } } out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -510,7 +503,7 @@ EXPORT_SYMBOL(qtree_delete_dquot); static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); loff_t ret = 0; int i; char *ddquot; @@ -522,9 +515,10 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); goto out_buf; } - for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); + for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); - i++, ddquot += info->dqi_entry_size); + i++, ddquot += info->dqi_entry_size) + ; if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: Quota for id %u referenced " "but not present.\n", dquot->dq_id); @@ -535,7 +529,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -543,7 +537,7 @@ out_buf: static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk, int depth) { - dqbuf_t buf = getdqbuf(info->dqi_usable_bs); + char *buf = getdqbuf(info->dqi_usable_bs); loff_t ret = 0; __le32 *ref = (__le32 *)buf; @@ -563,7 +557,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, else ret = find_block_dqentry(info, dquot, blk); out_buf: - freedqbuf(buf); + kfree(buf); return ret; } @@ -579,7 +573,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_type; struct super_block *sb = dquot->dq_sb; loff_t offset; - dqbuf_t ddquot; + char *ddquot; int ret = 0; #ifdef __QUOTA_QT_PARANOIA @@ -607,8 +601,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) ddquot = getdqbuf(info->dqi_entry_size); if (!ddquot) return -ENOMEM; - ret = sb->s_op->quota_read(sb, type, (char *)ddquot, - info->dqi_entry_size, dquot->dq_off); + ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, + dquot->dq_off); if (ret != info->dqi_entry_size) { if (ret >= 0) ret = -EIO; @@ -616,7 +610,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) "structure for id %u.\n", dquot->dq_id); set_bit(DQ_FAKE_B, &dquot->dq_flags); memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - freedqbuf(ddquot); + kfree(ddquot); goto out; } spin_lock(&dq_data_lock); @@ -627,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) !dquot->dq_dqb.dqb_isoftlimit) set_bit(DQ_FAKE_B, &dquot->dq_flags); spin_unlock(&dq_data_lock); - freedqbuf(ddquot); + kfree(ddquot); out: dqstats.reads++; return ret; -- cgit v0.10.2 From 9e3509e273ecc2a5f937c493f9bb71e5e41ac2e5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:45:12 +0100 Subject: vfs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: Alexander Viro diff --git a/fs/attr.c b/fs/attr.c index f436019..9fe1b1b 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr) if (!error) { if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) - error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, attr) ? + -EDQUOT : 0; if (!error) error = inode_setattr(inode, attr); } diff --git a/fs/inode.c b/fs/inode.c index 826fb0b..bb81bd5 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -284,7 +284,7 @@ void clear_inode(struct inode *inode) BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); inode_sync_wait(inode); - DQUOT_DROP(inode); + vfs_dq_drop(inode); if (inode->i_sb->s_op->clear_inode) inode->i_sb->s_op->clear_inode(inode); if (S_ISBLK(inode->i_mode) && inode->i_bdev) @@ -1158,7 +1158,7 @@ void generic_delete_inode(struct inode *inode) if (op->delete_inode) { void (*delete)(struct inode *) = op->delete_inode; if (!is_bad_inode(inode)) - DQUOT_INIT(inode); + vfs_dq_init(inode); /* Filesystems implementing their own * s_op->delete_inode are required to call * truncate_inode_pages and clear_inode() diff --git a/fs/namei.c b/fs/namei.c index bbc15c2..8937f4e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1470,7 +1470,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, error = security_inode_create(dir, dentry, mode); if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->create(dir, dentry, mode, nd); if (!error) fsnotify_create(dir, dentry); @@ -1544,7 +1544,7 @@ int may_open(struct path *path, int acc_mode, int flag) error = security_path_truncate(path, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); if (!error) { - DQUOT_INIT(inode); + vfs_dq_init(inode); error = do_truncate(dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, @@ -1555,7 +1555,7 @@ int may_open(struct path *path, int acc_mode, int flag) return error; } else if (flag & FMODE_WRITE) - DQUOT_INIT(inode); + vfs_dq_init(inode); return 0; } @@ -1938,7 +1938,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); @@ -2037,7 +2037,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); @@ -2123,7 +2123,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) if (!dir->i_op->rmdir) return -EPERM; - DQUOT_INIT(dir); + vfs_dq_init(dir); mutex_lock(&dentry->d_inode->i_mutex); dentry_unhash(dentry); @@ -2210,7 +2210,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) if (!dir->i_op->unlink) return -EPERM; - DQUOT_INIT(dir); + vfs_dq_init(dir); mutex_lock(&dentry->d_inode->i_mutex); if (d_mountpoint(dentry)) @@ -2321,7 +2321,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) if (error) return error; - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); @@ -2405,7 +2405,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de return error; mutex_lock(&inode->i_mutex); - DQUOT_INIT(dir); + vfs_dq_init(dir); error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&inode->i_mutex); if (!error) @@ -2604,8 +2604,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (!old_dir->i_op->rename) return -EPERM; - DQUOT_INIT(old_dir); - DQUOT_INIT(new_dir); + vfs_dq_init(old_dir); + vfs_dq_init(new_dir); old_name = fsnotify_oldname_init(old_dentry->d_name.name); diff --git a/fs/open.c b/fs/open.c index a3a78ce..75b6167 100644 --- a/fs/open.c +++ b/fs/open.c @@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) if (!error) error = security_path_truncate(&path, length, 0); if (!error) { - DQUOT_INIT(inode); + vfs_dq_init(inode); error = do_truncate(path.dentry, length, 0, NULL); } diff --git a/fs/super.c b/fs/super.c index 6ce5014..0f9d17f 100644 --- a/fs/super.c +++ b/fs/super.c @@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s) if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { s->s_count -= S_BIAS-1; spin_unlock(&sb_lock); - DQUOT_OFF(s, 0); + vfs_dq_off(s, 0); down_write(&s->s_umount); fs->kill_sb(s); put_filesystem(fs); @@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super); void __fsync_super(struct super_block *sb) { sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); + vfs_dq_sync(sb); lock_super(sb); if (sb->s_dirt && sb->s_op->write_super) sb->s_op->write_super(sb); @@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) mark_files_ro(sb); else if (!fs_may_remount_ro(sb)) return -EBUSY; - retval = DQUOT_OFF(sb, 1); + retval = vfs_dq_off(sb, 1); if (retval < 0 && retval != -ENOSYS) return -EBUSY; } @@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) } sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); if (remount_rw) - DQUOT_ON_REMOUNT(sb); + vfs_dq_quota_on_remount(sb); return 0; } diff --git a/fs/sync.c b/fs/sync.c index a16d53e..ef36bc9 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -25,7 +25,7 @@ static void do_sync(unsigned long wait) { wakeup_pdflush(0); sync_inodes(0); /* All mappings, inodes and their blockdevs */ - DQUOT_SYNC(NULL); + vfs_dq_sync(NULL); sync_supers(); /* Write the superblocks */ sync_filesystems(0); /* Start syncing the filesystems */ sync_filesystems(wait); /* Waitingly sync the filesystems */ -- cgit v0.10.2 From 314649558d2ff0927d102fa06c62557a92ce5c1d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:46:21 +0100 Subject: ramfs: Remove quota call Ramfs has no bussiness in quotas. Signed-off-by: Jan Kara diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 5d7c7ec..995ef1d 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) if (ret) return ret; - /* by providing our own setattr() method, we skip this quotaism */ - if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) || - (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid)) - ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0; - /* pick out size-changing events */ if (ia->ia_valid & ATTR_SIZE) { loff_t size = i_size_read(inode); -- cgit v0.10.2 From 6f90bee5062a8af24d8aa5c47182d15aa28a0f17 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:52:06 +0100 Subject: ext2: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: linux-ext4@vger.kernel.org diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 4a29d63..7f8d2e5 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -570,7 +570,7 @@ do_more: error_return: brelse(bitmap_bh); release_blocks(sb, freed); - DQUOT_FREE_BLOCK(inode, freed); + vfs_dq_free_block(inode, freed); } /** @@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal, /* * Check quota for allocation of this block. */ - if (DQUOT_ALLOC_BLOCK(inode, num)) { + if (vfs_dq_alloc_block(inode, num)) { *errp = -EDQUOT; return 0; } @@ -1409,7 +1409,7 @@ allocated: *errp = 0; brelse(bitmap_bh); - DQUOT_FREE_BLOCK(inode, *count-num); + vfs_dq_free_block(inode, *count-num); *count = num; return ret_block; @@ -1420,7 +1420,7 @@ out: * Undo the block allocation */ if (!performed_allocation) - DQUOT_FREE_BLOCK(inode, *count); + vfs_dq_free_block(inode, *count); brelse(bitmap_bh); return 0; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 66321a8..15387c9 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode) if (!is_bad_inode(inode)) { /* Quota is already initialized in iput() */ ext2_xattr_delete_inode(inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); } es = EXT2_SB(sb)->s_es; @@ -586,7 +586,7 @@ got: goto fail_drop; } - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto fail_drop; } @@ -605,10 +605,10 @@ got: return inode; fail_free_drop: - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 23fff2f..b43b9556 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr) return error; if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { - error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0; if (error) return error; } diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 987a526..7913531 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, ea_bdebug(new_bh, "reusing block"); error = -EDQUOT; - if (DQUOT_ALLOC_BLOCK(inode, 1)) { + if (vfs_dq_alloc_block(inode, 1)) { unlock_buffer(new_bh); goto cleanup; } @@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, * as if nothing happened and cleanup the unused block */ if (error && error != -ENOSPC) { if (new_bh && new_bh != old_bh) - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto cleanup; } } else @@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, le32_add_cpu(&HDR(old_bh)->h_refcount, -1); if (ce) mb_cache_entry_release(ce); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); mark_buffer_dirty(old_bh); ea_bdebug(old_bh, "refcount now=%d", le32_to_cpu(HDR(old_bh)->h_refcount)); @@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode) mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); } EXT2_I(inode)->i_file_acl = 0; -- cgit v0.10.2 From 81a052273998f94b098945c4c313e05246956eb2 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 16:58:01 +0100 Subject: ext3: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: linux-ext4@vger.kernel.org diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 0dbf1c0..225202d 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, } ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); if (dquot_freed_blocks) - DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); + vfs_dq_free_block(inode, dquot_freed_blocks); return; } @@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, /* * Check quota for allocation of this block. */ - if (DQUOT_ALLOC_BLOCK(inode, num)) { + if (vfs_dq_alloc_block(inode, num)) { *errp = -EDQUOT; return 0; } @@ -1714,7 +1714,7 @@ allocated: *errp = 0; brelse(bitmap_bh); - DQUOT_FREE_BLOCK(inode, *count-num); + vfs_dq_free_block(inode, *count-num); *count = num; return ret_block; @@ -1729,7 +1729,7 @@ out: * Undo the block allocation */ if (!performed_allocation) - DQUOT_FREE_BLOCK(inode, *count); + vfs_dq_free_block(inode, *count); brelse(bitmap_bh); return 0; } diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 8de6c72..dd13d60 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ - DQUOT_INIT(inode); + vfs_dq_init(inode); ext3_xattr_delete_inode(handle, inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); is_directory = S_ISDIR(inode->i_mode); @@ -589,7 +589,7 @@ got: sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; ret = inode; - if(DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto fail_drop; } @@ -620,10 +620,10 @@ really_out: return ret; fail_free_drop: - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 5fa453b..c8f9bd3 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -3055,7 +3055,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) error = PTR_ERR(handle); goto err_out; } - error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { ext3_journal_stop(handle); return error; @@ -3146,7 +3146,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) ret = 2 * (bpp + indirects) + 2; #ifdef CONFIG_QUOTA - /* We know that structure was already allocated during DQUOT_INIT so + /* We know that structure was already allocated during vfs_dq_init so * we will be updating only the data blocks + inodes */ ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); #endif @@ -3237,7 +3237,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * - * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks + * Also, vfs_dq_alloc_space() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 4db4ffa..e2fc63c 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go in * separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go * in separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) - DQUOT_INIT(new_dentry->d_inode); + vfs_dq_init(new_dentry->d_inode); handle = ext3_journal_start(old_dir, 2 * EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 41e6ae6..9e5b8e3 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1436,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb, } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); - DQUOT_INIT(inode); + vfs_dq_init(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", @@ -2700,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() - * DQUOT_INIT() down(dqio_mutex) + * vfs_dq_init() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index 175414a..83b7be8 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode, error = ext3_journal_dirty_metadata(handle, bh); if (IS_SYNC(inode)) handle->h_sync = 1; - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); if (ce) @@ -774,7 +774,7 @@ inserted: /* The old block is released after updating the inode. */ error = -EDQUOT; - if (DQUOT_ALLOC_BLOCK(inode, 1)) + if (vfs_dq_alloc_block(inode, 1)) goto cleanup; error = ext3_journal_get_write_access(handle, new_bh); @@ -848,7 +848,7 @@ cleanup: return error; cleanup_dquot: - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto cleanup; bad_block: -- cgit v0.10.2 From a269eb18294d35874c53311acc2cd0b5ef477ce5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:04:39 +0100 Subject: ext4: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara Acked-by: Mingming Cao CC: linux-ext4@vger.kernel.org diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index de9459b..38f40d5 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ext4_mb_free_blocks(handle, inode, block, count, metadata, &dquot_freed_blocks); if (dquot_freed_blocks) - DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); + vfs_dq_free_block(inode, dquot_freed_blocks); return; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 2d2b358..fb51b40 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ - DQUOT_INIT(inode); + vfs_dq_init(inode); ext4_xattr_delete_inode(handle, inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); is_directory = S_ISDIR(inode->i_mode); @@ -915,7 +915,7 @@ got: ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; ret = inode; - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto fail_drop; } @@ -956,10 +956,10 @@ really_out: return ret; fail_free_drop: - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 8290cfb..71d3ecd 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4642,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) error = PTR_ERR(handle); goto err_out; } - error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { ext4_journal_stop(handle); return error; @@ -5021,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * - * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks + * Also, vfs_dq_alloc_block() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4de4209..b038188 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4587,7 +4587,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, return 0; } reserv_blks = ar->len; - while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { + while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) { ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->len--; } @@ -4663,7 +4663,7 @@ out2: kmem_cache_free(ext4_ac_cachep, ac); out1: if (inquota && ar->len < inquota) - DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); + vfs_dq_free_block(ar->inode, inquota - ar->len); out3: if (!ar->len) { if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index ba702bd..8341024 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go in * separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) /* Initialize quotas before so that eventual writes go * in separate transaction */ - DQUOT_INIT(dentry->d_inode); + vfs_dq_init(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) - DQUOT_INIT(new_dentry->d_inode); + vfs_dq_init(new_dentry->d_inode); handle = ext4_journal_start(old_dir, 2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5a238e9..f7371a6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1804,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); - DQUOT_INIT(inode); + vfs_dq_init(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %lld bytes\n", @@ -3369,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext4_create() quota_sync() - * jbd2_journal_start() write_dquot() - * DQUOT_INIT() down(dqio_mutex) + * jbd2_journal_start() write_dquot() + * vfs_dq_init() down(dqio_mutex) * down(dqio_mutex) jbd2_journal_start() * */ diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 157ce65..62b31c2 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); if (ce) @@ -784,7 +784,7 @@ inserted: /* The old block is released after updating the inode. */ error = -EDQUOT; - if (DQUOT_ALLOC_BLOCK(inode, 1)) + if (vfs_dq_alloc_block(inode, 1)) goto cleanup; error = ext4_journal_get_write_access(handle, new_bh); @@ -860,7 +860,7 @@ cleanup: return error; cleanup_dquot: - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto cleanup; bad_block: -- cgit v0.10.2 From 77db4f25bca22ce96be0cd3c5a3160599817ff45 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:14:18 +0100 Subject: reiserfs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: reiserfs-devel@vger.kernel.org diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index 4646caa..f32d142 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th, journal_mark_dirty(th, s, sbh); if (for_unformatted) - DQUOT_FREE_BLOCK_NODIRTY(inode, 1); + vfs_dq_free_block_nodirty(inode, 1); } void reiserfs_free_block(struct reiserfs_transaction_handle *th, @@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start amount_needed, hint->inode->i_uid); #endif quota_ret = - DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed); + vfs_dq_alloc_block_nodirty(hint->inode, amount_needed); if (quota_ret) /* Quota exceeded? */ return QUOTA_EXCEEDED; if (hint->preallocate && hint->prealloc_size) { @@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start "reiserquota: allocating (prealloc) %d blocks id=%u", hint->prealloc_size, hint->inode->i_uid); #endif - quota_ret = - DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode, + quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode, hint->prealloc_size); if (quota_ret) hint->preallocate = hint->prealloc_size = 0; @@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start nr_allocated, hint->inode->i_uid); #endif - DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */ + /* Free not allocated blocks */ + vfs_dq_free_block_nodirty(hint->inode, + amount_needed + hint->prealloc_size - + nr_allocated); } while (nr_allocated--) reiserfs_free_block(hint->th, hint->inode, @@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start REISERFS_I(hint->inode)->i_prealloc_count, hint->inode->i_uid); #endif - DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + + vfs_dq_free_block_nodirty(hint->inode, amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)-> i_prealloc_count); diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 55fce92..823227a 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode) * after delete_object so that quota updates go into the same transaction as * stat data deletion */ if (!err) - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); if (journal_end(&th, inode->i_sb, jbegin_count)) goto out; @@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { err = -EDQUOT; goto out_end_trans; } @@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, INODE_PKEY(inode)->k_objectid = 0; /* Quota change must be inside a transaction for journaling */ - DQUOT_FREE_INODE(inode); + vfs_dq_free_inode(inode); out_end_trans: journal_end(th, th->t_super, th->t_blocks_allocated); /* Drop can be outside and it needs more credits so it's better to have it outside */ - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; make_bad_inode(inode); @@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) if (error) goto out; error = - DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; + vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; if (error) { journal_end(&th, inode->i_sb, jbegin_count); diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 738967f..639d635 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th, */ static int drop_new_inode(struct inode *inode) { - DQUOT_DROP(inode); + vfs_dq_drop(inode); make_bad_inode(inode); inode->i_flags |= S_NOQUOTA; iput(inode); @@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode) } /* utility function that does setup for reiserfs_new_inode. -** DQUOT_INIT needs lots of credits so it's better to have it +** vfs_dq_init needs lots of credits so it's better to have it ** outside of a transaction, so we had to pull some bits of ** reiserfs_new_inode out into this func. */ @@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode) } else { inode->i_gid = current_fsgid(); } - DQUOT_INIT(inode); + vfs_dq_init(inode); return 0; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index abbc64d..73aaa33 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath "reiserquota delete_item(): freeing %u, id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih)); #endif - DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); + vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes); /* Return deleted body length */ return n_ret_value; @@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, quota_cut_bytes, inode->i_uid, key2type(key)); #endif - DQUOT_FREE_SPACE_NODIRTY(inode, + vfs_dq_free_space_nodirty(inode, quota_cut_bytes); } break; @@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, "reiserquota cut_from_item(): freeing %u id=%u type=%c", quota_cut_bytes, p_s_inode->i_uid, '?'); #endif - DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); + vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes); return n_ret_value; } @@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree key2type(&(p_s_key->on_disk_key))); #endif - if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) { + if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) { pathrelse(p_s_search_path); return -EDQUOT; } @@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree n_pasted_size, inode->i_uid, key2type(&(p_s_key->on_disk_key))); #endif - DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size); + vfs_dq_free_space_nodirty(inode, n_pasted_size); return retval; } @@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath #endif /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ - if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) { + if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) { pathrelse(p_s_path); return -EDQUOT; } @@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath quota_bytes, inode->i_uid, head2type(p_s_ih)); #endif if (inode) - DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes); + vfs_dq_free_space_nodirty(inode, quota_bytes); return retval; } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 317c035..5dbafb7 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s) retval = remove_save_link_only(s, &save_link_key, 0); continue; } - DQUOT_INIT(inode); + vfs_dq_init(inode); if (truncate && S_ISDIR(inode->i_mode)) { /* We got a truncate request for a dir which is impossible. -- cgit v0.10.2 From 5f5fa796c6b0158b50a2d5d2f80a926466ea87b3 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:15:30 +0100 Subject: ufs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara CC: Evgeniy Dushistov diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 0d9ada1..54c16ec 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) "bit already cleared for fragment %u", i); } - DQUOT_FREE_BLOCK (inode, count); + vfs_dq_free_block(inode, count); fs32_add(sb, &ucg->cg_cs.cs_nffree, count); @@ -195,7 +195,7 @@ do_more: ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); - DQUOT_FREE_BLOCK(inode, uspi->s_fpb); + vfs_dq_free_block(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; @@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); for (i = oldcount; i < newcount; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); - if(DQUOT_ALLOC_BLOCK(inode, count)) { + if (vfs_dq_alloc_block(inode, count)) { *err = -EDQUOT; return 0; } @@ -664,7 +664,7 @@ cg_found: for (i = count; i < uspi->s_fpb; i++) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); i = uspi->s_fpb - count; - DQUOT_FREE_BLOCK(inode, i); + vfs_dq_free_block(inode, i); fs32_add(sb, &ucg->cg_cs.cs_nffree, i); uspi->cs_total.cs_nffree += i; @@ -676,7 +676,7 @@ cg_found: result = ufs_bitmap_search (sb, ucpi, goal, allocsize); if (result == INVBLOCK) return 0; - if(DQUOT_ALLOC_BLOCK(inode, count)) { + if (vfs_dq_alloc_block(inode, count)) { *err = -EDQUOT; return 0; } @@ -747,7 +747,7 @@ gotit: ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, -1); - if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { + if (vfs_dq_alloc_block(inode, uspi->s_fpb)) { *err = -EDQUOT; return INVBLOCK; } diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 6f5dcf0..3527c00 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c @@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode) is_directory = S_ISDIR(inode->i_mode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); clear_inode (inode); @@ -355,8 +355,8 @@ cg_found: unlock_super (sb); - if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); + if (vfs_dq_alloc_inode(inode)) { + vfs_dq_drop(inode); err = -EDQUOT; goto fail_without_unlock; } -- cgit v0.10.2 From bacfb7c2e5d10f40f7adb23aeeffc824b839eaa8 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:20:46 +0100 Subject: udf: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 1b809bd4..2bb788a 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb, ((char *)bh->b_data)[(bit + i) >> 3]); } else { if (inode) - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); udf_add_free_space(sbi, sbi->s_partition, 1); } } @@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb, while (bit < (sb->s_blocksize << 3) && block_count > 0) { if (!udf_test_bit(bit, bh->b_data)) goto out; - else if (DQUOT_PREALLOC_BLOCK(inode, 1)) + else if (vfs_dq_prealloc_block(inode, 1)) goto out; else if (!udf_clear_bit(bit, bh->b_data)) { udf_debug("bit already cleared for block %d\n", bit); - DQUOT_FREE_BLOCK(inode, 1); + vfs_dq_free_block(inode, 1); goto out; } block_count--; @@ -393,7 +393,7 @@ got_block: /* * Check quota for allocation of this block. */ - if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { + if (inode && vfs_dq_alloc_block(inode, 1)) { mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; return 0; @@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb, /* We do this up front - There are some error conditions that could occure, but.. oh well */ if (inode) - DQUOT_FREE_BLOCK(inode, count); + vfs_dq_free_block(inode, count); if (udf_add_free_space(sbi, sbi->s_partition, count)) mark_buffer_dirty(sbi->s_lvid_bh); @@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb, epos.offset -= adsize; alloc_count = (elen >> sb->s_blocksize_bits); - if (inode && DQUOT_PREALLOC_BLOCK(inode, + if (inode && vfs_dq_prealloc_block(inode, alloc_count > block_count ? block_count : alloc_count)) alloc_count = 0; else if (alloc_count > block_count) { @@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb, goal_eloc.logicalBlockNum++; goal_elen -= sb->s_blocksize; - if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { + if (inode && vfs_dq_alloc_block(inode, 1)) { brelse(goal_epos.bh); mutex_unlock(&sbi->s_alloc_mutex); *err = -EDQUOT; diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 31fc842..47dbe56 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode) * Note: we must free any quota before locking the superblock, * as writing the quota to disk may need the lock as well. */ - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); clear_inode(inode); @@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) insert_inode_hash(inode); mark_inode_dirty(inode); - if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); + if (vfs_dq_alloc_inode(inode)) { + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); -- cgit v0.10.2 From c94d2a22f26bdb11d3dd817669b940a8c76a8cad Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:22:32 +0100 Subject: jfs: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. Signed-off-by: Jan Kara Acked-by: Dave Kleikamp diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index d3e5c33..a166c16 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr) if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { - if (DQUOT_TRANSFER(inode, iattr)) + if (vfs_dq_transfer(inode, iattr)) return -EDQUOT; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index b00ee9f..b2ae190 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode) /* * Free the inode from the quota allocation. */ - DQUOT_INIT(inode); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); + vfs_dq_init(inode); + vfs_dq_free_inode(inode); + vfs_dq_drop(inode); } clear_inode(inode); diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 4dcc058..925871e 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) * It's time to move the inline table to an external * page and begin to build the xtree */ - if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage)) + if (vfs_dq_alloc_block(ip, sbi->nbperpage)) goto clean_up; if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { - DQUOT_FREE_BLOCK(ip, sbi->nbperpage); + vfs_dq_free_block(ip, sbi->nbperpage); goto clean_up; } @@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) memcpy(&jfs_ip->i_dirtable, temp_table, sizeof (temp_table)); dbFree(ip, xaddr, sbi->nbperpage); - DQUOT_FREE_BLOCK(ip, sbi->nbperpage); + vfs_dq_free_block(ip, sbi->nbperpage); goto clean_up; } ip->i_size = PSIZE; @@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid, n = xlen; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, n)) { + if (vfs_dq_alloc_block(ip, n)) { rc = -EDQUOT; goto extendOut; } @@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid, /* Rollback quota allocation */ if (rc && quota_allocation) - DQUOT_FREE_BLOCK(ip, quota_allocation); + vfs_dq_free_block(ip, quota_allocation); dtSplitUp_Exit: @@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split, return -EIO; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { release_metapage(rmp); return -EDQUOT; } @@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid, rp = rmp->data; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { release_metapage(rmp); return -EDQUOT; } @@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, xlen = lengthPXD(&fp->header.self); /* Free quota allocation. */ - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); /* free/invalidate its buffer page */ discard_metapage(fmp); @@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip, xlen = lengthPXD(&p->header.self); /* Free quota allocation */ - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); /* free/invalidate its buffer page */ discard_metapage(mp); diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c index 7ae1e32..169802e 100644 --- a/fs/jfs/jfs_extent.c +++ b/fs/jfs/jfs_extent.c @@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) } /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { + if (vfs_dq_alloc_block(ip, nxlen)) { dbFree(ip, nxaddr, (s64) nxlen); mutex_unlock(&JFS_IP(ip)->commit_mutex); return -EDQUOT; @@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) */ if (rc) { dbFree(ip, nxaddr, nxlen); - DQUOT_FREE_BLOCK(ip, nxlen); + vfs_dq_free_block(ip, nxlen); mutex_unlock(&JFS_IP(ip)->commit_mutex); return (rc); } @@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) goto exit; /* Allocat blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { + if (vfs_dq_alloc_block(ip, nxlen)) { dbFree(ip, nxaddr, (s64) nxlen); mutex_unlock(&JFS_IP(ip)->commit_mutex); return -EDQUOT; @@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) /* extend the extent */ if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { dbFree(ip, xaddr + xlen, delta); - DQUOT_FREE_BLOCK(ip, nxlen); + vfs_dq_free_block(ip, nxlen); goto exit; } } else { @@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) */ if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { dbFree(ip, nxaddr, nxlen); - DQUOT_FREE_BLOCK(ip, nxlen); + vfs_dq_free_block(ip, nxlen); goto exit; } } diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index d4d142c..dc0e021 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c @@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) /* * Allocate inode to quota. */ - if (DQUOT_ALLOC_INODE(inode)) { + if (vfs_dq_alloc_inode(inode)) { rc = -EDQUOT; goto fail_drop; } @@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) return inode; fail_drop: - DQUOT_DROP(inode); + vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; fail_unlock: inode->i_nlink = 0; diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c index ae3acaf..a27e26c 100644 --- a/fs/jfs/jfs_xtree.c +++ b/fs/jfs/jfs_xtree.c @@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */ hint = addressXAD(xad) + lengthXAD(xad) - 1; } else hint = 0; - if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen))) + if ((rc = vfs_dq_alloc_block(ip, xlen))) goto out; if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); goto out; } } @@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */ /* undo data extent allocation */ if (*xaddrp == 0) { dbFree(ip, xaddr, (s64) xlen); - DQUOT_FREE_BLOCK(ip, xlen); + vfs_dq_free_block(ip, xlen); } return rc; } @@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip, rbn = addressPXD(pxd); /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { rc = -EDQUOT; goto clean_up; } @@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip, /* Rollback quota allocation. */ if (quota_allocation) - DQUOT_FREE_BLOCK(ip, quota_allocation); + vfs_dq_free_block(ip, quota_allocation); return (rc); } @@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid, return -EIO; /* Allocate blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { + if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { release_metapage(rmp); return -EDQUOT; } @@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) ip->i_size = newsize; /* update quota allocation to reflect freed blocks */ - DQUOT_FREE_BLOCK(ip, nfreed); + vfs_dq_free_block(ip, nfreed); /* * free tlock of invalidated pages diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index b4de56b..9feaa04 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ - DQUOT_INIT(ip); + vfs_dq_init(ip); /* directory must be empty to be removed */ if (!dtEmpty(ip)) { @@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); /* Init inode for quota operations. */ - DQUOT_INIT(ip); + vfs_dq_init(ip); if ((rc = get_UCSname(&dname, dentry))) goto out; @@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, } else if (new_ip) { IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); /* Init inode for quota operations. */ - DQUOT_INIT(new_ip); + vfs_dq_init(new_ip); } /* diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 9b7f2cd..61dfa81 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; /* Allocate new blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(ip, nblocks)) { + if (vfs_dq_alloc_block(ip, nblocks)) { return -EDQUOT; } rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); if (rc) { /*Rollback quota allocation. */ - DQUOT_FREE_BLOCK(ip, nblocks); + vfs_dq_free_block(ip, nblocks); return rc; } @@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, failed: /* Rollback quota allocation. */ - DQUOT_FREE_BLOCK(ip, nblocks); + vfs_dq_free_block(ip, nblocks); dbFree(ip, blkno, nblocks); return rc; @@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) if (blocks_needed > current_blocks) { /* Allocate new blocks to quota. */ - if (DQUOT_ALLOC_BLOCK(inode, blocks_needed)) + if (vfs_dq_alloc_block(inode, blocks_needed)) return -EDQUOT; quota_allocation = blocks_needed; @@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) clean_up: /* Rollback quota allocation */ if (quota_allocation) - DQUOT_FREE_BLOCK(inode, quota_allocation); + vfs_dq_free_block(inode, quota_allocation); return (rc); } @@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf, /* If old blocks exist, they must be removed from quota allocation. */ if (old_blocks) - DQUOT_FREE_BLOCK(inode, old_blocks); + vfs_dq_free_block(inode, old_blocks); inode->i_ctime = CURRENT_TIME; -- cgit v0.10.2 From 90c0af05a5a486f7709195e20bb26ad8f67ba5af Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 9 Feb 2009 22:22:21 +0100 Subject: nfsd: Use lowercase names of quota functions Use lowercase names of quota functions instead of old uppercase ones. CC: bfields@fieldses.org CC: neilb@suse.de Signed-off-by: Jan Kara diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6e50aaa..ad38fc9 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, put_write_access(inode); goto out_nfserr; } - DQUOT_INIT(inode); + vfs_dq_init(inode); } /* sanitize the mode change */ @@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, else flags = O_WRONLY|O_LARGEFILE; - DQUOT_INIT(inode); + vfs_dq_init(inode); } *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), flags, cred); -- cgit v0.10.2 From bf84c82d000b9820b01f516d13d328f354f8a8ee Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 26 Jan 2009 17:37:01 +0100 Subject: quota: Remove uppercase aliases for quota functions. Since all users have been converted, remove uppercase names of quota functions. Signed-off-by: Jan Kara diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 69b502e..36353d9 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -454,35 +454,4 @@ static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) vfs_dq_free_space(inode, nr << inode->i_blkbits); } -/* - * Define uppercase equivalents for compatibility with old function names - * Can go away when we think all users have been converted (15/04/2008) - */ -#define DQUOT_INIT(inode) vfs_dq_init(inode) -#define DQUOT_DROP(inode) vfs_dq_drop(inode) -#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \ - vfs_dq_prealloc_space_nodirty(inode, nr) -#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr) -#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \ - vfs_dq_alloc_space_nodirty(inode, nr) -#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr) -#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \ - vfs_dq_prealloc_block_nodirty(inode, nr) -#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr) -#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \ - vfs_dq_alloc_block_nodirty(inode, nr) -#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr) -#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode) -#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \ - vfs_dq_free_space_nodirty(inode, nr) -#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr) -#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \ - vfs_dq_free_block_nodirty(inode, nr) -#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr) -#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode) -#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr) -#define DQUOT_SYNC(sb) vfs_dq_sync(sb) -#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount) -#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb) - #endif /* _LINUX_QUOTAOPS_ */ -- cgit v0.10.2 From 7a2435d874388271cfe5046d180751352a1d30a2 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 27 Jan 2009 01:47:11 +0100 Subject: quota: Remove superfluous inlines Remove inlines of large functions to decrease code size (saved 1543 bytes). Signed-off-by: Jan Kara diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 4881db3..4a37b8e 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -243,7 +243,8 @@ static inline void remove_dquot_hash(struct dquot *dquot) hlist_del_init(&dquot->dq_hash); } -static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) +static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, + unsigned int id, int type) { struct hlist_node *node; struct dquot *dquot; @@ -935,7 +936,7 @@ void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_rsvspace -= number; } -static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) +static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || dquot->dq_dqb.dqb_curinodes >= number) @@ -947,7 +948,7 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) clear_bit(DQ_INODES_B, &dquot->dq_flags); } -static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) +static void dquot_decr_space(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || dquot->dq_dqb.dqb_curspace >= number) @@ -974,7 +975,7 @@ static int warning_issued(struct dquot *dquot, const int warntype) #ifdef CONFIG_PRINT_QUOTA_WARNING static int flag_print_warnings = 1; -static inline int need_print_warning(struct dquot *dquot) +static int need_print_warning(struct dquot *dquot) { if (!flag_print_warnings) return 0; @@ -1109,7 +1110,7 @@ err_out: * * Note that this function can sleep. */ -static inline void flush_warnings(struct dquot * const *dquots, char *warntype) +static void flush_warnings(struct dquot *const *dquots, char *warntype) { int i; @@ -1125,7 +1126,7 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype) } } -static inline char ignore_hardlimit(struct dquot *dquot) +static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index d76ada9..89541bc 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -341,7 +341,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void * look up a superblock on which quota ops will be performed * - use the name of a block device to find the superblock thereon */ -static inline struct super_block *quotactl_block(const char __user *special) +static struct super_block *quotactl_block(const char __user *special) { #ifdef CONFIG_BLOCK struct block_device *bdev; diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 3a5a90c..48e35a4 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -33,7 +33,7 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) } /* Number of entries in one blocks */ -static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) +static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) { return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) / info->dqi_entry_size; @@ -47,7 +47,7 @@ static char *getdqbuf(size_t size) return buf; } -static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) +static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; @@ -56,7 +56,7 @@ static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *bu info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } -static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) +static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { struct super_block *sb = info->dqi_sb; -- cgit v0.10.2 From 268157ba673e2a868c167211e39fcad4ada5fd1e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 27 Jan 2009 15:47:22 +0100 Subject: quota: Coding style fixes Wrap long lines, remove assignments from conditions, rewrite two overcomplicated for loops. Signed-off-by: Jan Kara diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 4a37b8e..a1bd5ea 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -156,7 +156,9 @@ void unregister_quota_format(struct quota_format_type *fmt) struct quota_format_type **actqf; spin_lock(&dq_list_lock); - for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); + for (actqf = "a_formats; *actqf && *actqf != fmt; + actqf = &(*actqf)->qf_next) + ; if (*actqf) *actqf = (*actqf)->qf_next; spin_unlock(&dq_list_lock); @@ -168,18 +170,25 @@ static struct quota_format_type *find_quota_format(int id) struct quota_format_type *actqf; spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; + actqf = actqf->qf_next) + ; if (!actqf || !try_module_get(actqf->qf_owner)) { int qm; spin_unlock(&dq_list_lock); - for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); - if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) + for (qm = 0; module_names[qm].qm_fmt_id && + module_names[qm].qm_fmt_id != id; qm++) + ; + if (!module_names[qm].qm_fmt_id || + request_module(module_names[qm].qm_mod_name)) return NULL; spin_lock(&dq_list_lock); - for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; + actqf = actqf->qf_next) + ; if (actqf && !try_module_get(actqf->qf_owner)) actqf = NULL; } @@ -234,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type) */ static inline void insert_dquot_hash(struct dquot *dquot) { - struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); + struct hlist_head *head; + head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); hlist_add_head(&dquot->dq_hash, head); } @@ -251,7 +261,8 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, hlist_for_each (node, dquot_hash+hashent) { dquot = hlist_entry(node, struct dquot, dq_hash); - if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) + if (dquot->dq_sb == sb && dquot->dq_id == id && + dquot->dq_type == type) return dquot; } return NULL; @@ -351,8 +362,10 @@ int dquot_acquire(struct dquot *dquot) if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); /* Write the info if needed */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (info_dirty(&dqopt->info[dquot->dq_type])) { + ret2 = dqopt->ops[dquot->dq_type]->write_file_info( + dquot->dq_sb, dquot->dq_type); + } if (ret < 0) goto out_iolock; if (ret2 < 0) { @@ -387,8 +400,10 @@ int dquot_commit(struct dquot *dquot) * => we have better not writing it */ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (info_dirty(&dqopt->info[dquot->dq_type])) { + ret2 = dqopt->ops[dquot->dq_type]->write_file_info( + dquot->dq_sb, dquot->dq_type); + } if (ret >= 0) ret = ret2; } @@ -414,8 +429,10 @@ int dquot_release(struct dquot *dquot) if (dqopt->ops[dquot->dq_type]->release_dqblk) { ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); /* Write the info */ - if (info_dirty(&dqopt->info[dquot->dq_type])) - ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); + if (info_dirty(&dqopt->info[dquot->dq_type])) { + ret2 = dqopt->ops[dquot->dq_type]->write_file_info( + dquot->dq_sb, dquot->dq_type); + } if (ret >= 0) ret = ret2; } @@ -543,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dirty = &dqopt->info[cnt].dqi_dirty_list; while (!list_empty(dirty)) { - dquot = list_first_entry(dirty, struct dquot, dq_dirty); + dquot = list_first_entry(dirty, struct dquot, + dq_dirty); /* Dirty and inactive can be only bad dquot... */ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { clear_dquot_dirty(dquot); @@ -763,11 +781,12 @@ we_slept: dqstats.lookups++; spin_unlock(&dq_list_lock); } - /* Wait for dq_lock - after this we know that either dquot_release() is already - * finished or it will be canceled due to dq_count > 1 test */ + /* Wait for dq_lock - after this we know that either dquot_release() is + * already finished or it will be canceled due to dq_count > 1 test */ wait_on_dquot(dquot); - /* Read the dquot and instantiate it (everything done only if needed) */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { + /* Read the dquot / allocate space in quota file */ + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && + sb->dq_op->acquire_dquot(dquot) < 0) { dqput(dquot); dquot = NULL; goto out; @@ -828,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type) iput(old_inode); } -/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ +/* + * Return 0 if dqput() won't block. + * (note that 1 doesn't necessarily mean blocking) + */ static inline int dqput_blocks(struct dquot *dquot) { if (atomic_read(&dquot->dq_count) <= 1) @@ -836,8 +858,11 @@ static inline int dqput_blocks(struct dquot *dquot) return 0; } -/* Remove references to dquots from inode - add dquot to list for freeing if needed */ -/* We can't race with anybody because we hold dqptr_sem for writing... */ +/* + * Remove references to dquots from inode and add dquot to list for freeing + * if we have the last referece to dquot + * We can't race with anybody because we hold dqptr_sem for writing... + */ static int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) { @@ -851,7 +876,9 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); #endif spin_lock(&dq_list_lock); - list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ + /* As dquot must have currently users it can't be on + * the free list... */ + list_add(&dquot->dq_free, tofree_head); spin_unlock(&dq_list_lock); return 1; } @@ -861,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, return 0; } -/* Free list of dquots - called from inode.c */ -/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ +/* + * Free list of dquots + * Dquots are removed from inodes and no new references can be got so we are + * the only ones holding reference + */ static void put_dquot_list(struct list_head *tofree_head) { struct list_head *act_head; struct dquot *dquot; act_head = tofree_head->next; - /* So now we have dquots on the list... Just free them */ while (act_head != tofree_head) { dquot = list_entry(act_head, struct dquot, dq_free); act_head = act_head->next; - list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ + /* Remove dquot from the list so we won't have problems... */ + list_del_init(&dquot->dq_free); dqput(dquot); } } @@ -1131,37 +1161,42 @@ static int ignore_hardlimit(struct dquot *dquot) struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; return capable(CAP_SYS_RESOURCE) && - (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); + (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || + !(info->dqi_flags & V1_DQF_RSQUASH)); } /* needs dq_data_lock */ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) { + qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; + *warntype = QUOTA_NL_NOWARN; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; if (dquot->dq_dqb.dqb_ihardlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && + newinodes > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { *warntype = QUOTA_NL_IHARDWARN; return NO_QUOTA; } if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && - dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && + newinodes > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime && + get_seconds() >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { *warntype = QUOTA_NL_ISOFTLONGWARN; return NO_QUOTA; } if (dquot->dq_dqb.dqb_isoftlimit && - (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + newinodes > dquot->dq_dqb.dqb_isoftlimit && dquot->dq_dqb.dqb_itime == 0) { *warntype = QUOTA_NL_ISOFTWARN; - dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; + dquot->dq_dqb.dqb_itime = get_seconds() + + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; } return QUOTA_OK; @@ -1171,9 +1206,10 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { qsize_t tspace; + struct super_block *sb = dquot->dq_sb; *warntype = QUOTA_NL_NOWARN; - if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || + if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) return QUOTA_OK; @@ -1190,7 +1226,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war if (dquot->dq_dqb.dqb_bsoftlimit && tspace > dquot->dq_dqb.dqb_bsoftlimit && - dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && + dquot->dq_dqb.dqb_btime && + get_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = QUOTA_NL_BSOFTLONGWARN; @@ -1202,7 +1239,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = QUOTA_NL_BSOFTWARN; - dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; + dquot->dq_dqb.dqb_btime = get_seconds() + + sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; } else /* @@ -1217,15 +1255,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war static int info_idq_free(struct dquot *dquot, qsize_t inodes) { + qsize_t newinodes; + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) return QUOTA_NL_NOWARN; - if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) + newinodes = dquot->dq_dqb.dqb_curinodes - inodes; + if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) return QUOTA_NL_ISOFTBELOW; if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && - dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) + newinodes < dquot->dq_dqb.dqb_ihardlimit) return QUOTA_NL_IHARDBELOW; return QUOTA_NL_NOWARN; } @@ -1466,7 +1507,8 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!inode->i_dquot[cnt]) continue; - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) + if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) + == NO_QUOTA) goto warn_put_all; } @@ -1673,19 +1715,13 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) transfer_from[cnt] = NULL; transfer_to[cnt] = NULL; warntype_to[cnt] = QUOTA_NL_NOWARN; - switch (cnt) { - case USRQUOTA: - if (!chuid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); - break; - case GRPQUOTA: - if (!chgid) - continue; - transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); - break; - } } + if (chuid) + transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid, + USRQUOTA); + if (chgid) + transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid, + GRPQUOTA); down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); /* Now recheck reliably when holding dqptr_sem */ @@ -1881,8 +1917,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) drop_dquot_ref(sb, cnt); invalidate_dquots(sb, cnt); /* - * Now all dquots should be invalidated, all writes done so we should be only - * users of the info. No locks needed. + * Now all dquots should be invalidated, all writes done so we + * should be only users of the info. No locks needed. */ if (info_dirty(&dqopt->info[cnt])) sb->dq_op->write_info(sb, cnt); @@ -1920,10 +1956,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) /* If quota was reenabled in the meantime, we have * nothing to do */ if (!sb_has_quota_loaded(sb, cnt)) { - mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); + mutex_lock_nested(&toputinode[cnt]->i_mutex, + I_MUTEX_QUOTA); toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | S_NOATIME | S_NOQUOTA); - truncate_inode_pages(&toputinode[cnt]->i_data, 0); + truncate_inode_pages(&toputinode[cnt]->i_data, + 0); mutex_unlock(&toputinode[cnt]->i_mutex); mark_inode_dirty(toputinode[cnt]); } @@ -2013,7 +2051,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, * possible) Also nobody should write to the file - we use * special IO operations which ignore the immutable bit. */ down_write(&dqopt->dqptr_sem); - oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); + oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | + S_NOQUOTA); inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; up_write(&dqopt->dqptr_sem); sb->dq_op->drop(inode); @@ -2032,7 +2071,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, dqopt->info[type].dqi_fmt_id = format_id; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); mutex_lock(&dqopt->dqio_mutex); - if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { + error = dqopt->ops[type]->read_file_info(sb, type); + if (error < 0) { mutex_unlock(&dqopt->dqio_mutex); goto out_file_init; } @@ -2254,7 +2294,8 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) spin_unlock(&dq_data_lock); } -int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, + struct if_dqblk *di) { struct dquot *dquot; @@ -2318,22 +2359,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) } if (check_blim) { - if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { + if (!dm->dqb_bsoftlimit || + dm->dqb_curspace < dm->dqb_bsoftlimit) { dm->dqb_btime = 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ + } else if (!(di->dqb_valid & QIF_BTIME)) + /* Set grace only if user hasn't provided his own... */ dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; } if (check_ilim) { - if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { + if (!dm->dqb_isoftlimit || + dm->dqb_curinodes < dm->dqb_isoftlimit) { dm->dqb_itime = 0; clear_bit(DQ_INODES_B, &dquot->dq_flags); - } - else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ + } else if (!(di->dqb_valid & QIF_ITIME)) + /* Set grace only if user hasn't provided his own... */ dm->dqb_itime = get_seconds() + dqi->dqi_igrace; } - if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) + if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || + dm->dqb_isoftlimit) clear_bit(DQ_FAKE_B, &dquot->dq_flags); else set_bit(DQ_FAKE_B, &dquot->dq_flags); @@ -2343,7 +2387,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) return 0; } -int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, + struct if_dqblk *di) { struct dquot *dquot; int rc; @@ -2400,7 +2445,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) if (ii->dqi_valid & IIF_IGRACE) mi->dqi_igrace = ii->dqi_igrace; if (ii->dqi_valid & IIF_FLAGS) - mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); + mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | + (ii->dqi_flags & DQF_MASK); spin_unlock(&dq_data_lock); mark_info_dirty(sb, type); /* Force write to disk */ @@ -2559,7 +2605,8 @@ static int __init dquot_init(void) #ifdef CONFIG_QUOTA_NETLINK_INTERFACE if (genl_register_family("a_genl_family) != 0) - printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); + printk(KERN_ERR + "VFS: Failed to create quota netlink interface.\n"); #endif return 0; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 89541bc..b7f5a46 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -20,7 +20,8 @@ #include /* Check validity of generic quotactl commands */ -static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, + qid_t id) { if (type >= MAXQUOTAS) return -EINVAL; @@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid case Q_SETINFO: case Q_SETQUOTA: case Q_GETQUOTA: - /* This is just informative test so we are satisfied without a lock */ + /* This is just an informative test so we are satisfied + * without the lock */ if (!sb_has_quota_active(sb, type)) return -ESRCH; } @@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid } /* Check validity of XFS Quota Manager commands */ -static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, + qid_t id) { if (type >= XQM_MAXQUOTAS) return -EINVAL; @@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i return 0; } -static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +static int check_quotactl_valid(struct super_block *sb, int type, int cmd, + qid_t id) { int error; @@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type) continue; if (!sb_has_quota_active(sb, cnt)) continue; - mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, + I_MUTEX_QUOTA); truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); } @@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type) spin_lock(&sb_lock); restart: list_for_each_entry(sb, &super_blocks, s_list) { - /* This test just improves performance so it needn't be reliable... */ + /* This test just improves performance so it needn't be + * reliable... */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && type != cnt) continue; if (!sb_has_quota_active(sb, cnt)) continue; if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && - list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) + list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) continue; break; } @@ -227,7 +233,8 @@ restart: } /* Copy parameters and call proper function */ -static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) +static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, + void __user *addr) { int ret; @@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_QUOTAON: { char *pathname; - if (IS_ERR(pathname = getname(addr))) + pathname = getname(addr); + if (IS_ERR(pathname)) return PTR_ERR(pathname); ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); putname(pathname); @@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_GETINFO: { struct if_dqinfo info; - if ((ret = sb->s_qcop->get_info(sb, type, &info))) + ret = sb->s_qcop->get_info(sb, type, &info); + if (ret) return ret; if (copy_to_user(addr, &info, sizeof(info))) return -EFAULT; @@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_GETQUOTA: { struct if_dqblk idq; - if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) + ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); + if (ret) return ret; if (copy_to_user(addr, &idq, sizeof(idq))) return -EFAULT; @@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void case Q_XGETQUOTA: { struct fs_disk_quota fdq; - if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) + ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); + if (ret) return ret; if (copy_to_user(addr, &fdq, sizeof(fdq))) return -EFAULT; diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 48e35a4..f81f4bc 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -43,7 +43,8 @@ static char *getdqbuf(size_t size) { char *buf = kmalloc(size, GFP_NOFS); if (!buf) - printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + printk(KERN_WARNING + "VFS: Not enough memory for quota buffers.\n"); return buf; } @@ -113,7 +114,8 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) } /* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, + uint blk) { char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; @@ -150,7 +152,9 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint bl dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); /* No matter whether write succeeds block is out of list */ if (write_blk(info, blk, buf) < 0) - printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + printk(KERN_ERR + "VFS: Can't write block (%u) with free entries.\n", + blk); return 0; out_buf: kfree(tmpbuf); @@ -158,7 +162,8 @@ out_buf: } /* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk) +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, + uint blk) { char *tmpbuf = getdqbuf(info->dqi_usable_bs); struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; @@ -230,7 +235,8 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, return 0; } memset(buf, 0, info->dqi_usable_bs); - /* This is enough as block is already zeroed and entry list is empty... */ + /* This is enough as the block is already zeroed and the entry + * list is empty... */ info->dqi_free_entry = blk; mark_info_dirty(dquot->dq_sb, dquot->dq_type); } @@ -246,10 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, } le16_add_cpu(&dh->dqdh_entries, 1); /* Find free structure in block */ - for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); - i++, ddquot += info->dqi_entry_size) - ; + ddquot = buf + sizeof(struct qt_disk_dqdbheader); + for (i = 0; i < qtree_dqstr_in_blk(info); i++) { + if (qtree_entry_unused(info, ddquot)) + break; + ddquot += info->dqi_entry_size; + } #ifdef __QUOTA_QT_PARANOIA if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " @@ -340,7 +348,8 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, } /* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... + * We don't have to be afraid of deadlocks as we never have quotas on quota + * files... */ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) { @@ -515,10 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); goto out_buf; } - for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader); - i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); - i++, ddquot += info->dqi_entry_size) - ; + ddquot = buf + sizeof(struct qt_disk_dqdbheader); + for (i = 0; i < qtree_dqstr_in_blk(info); i++) { + if (info->dqi_ops->is_id(ddquot, dquot)) + break; + ddquot += info->dqi_entry_size; + } if (i == qtree_dqstr_in_blk(info)) { printk(KERN_ERR "VFS: Quota for id %u referenced " "but not present.\n", dquot->dq_id); @@ -632,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot); * the only one operating on dquot (thanks to dq_lock) */ int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) { - if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && + !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) return qtree_delete_dquot(info, dquot); return 0; } diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c index b4af1c6..0edcf42 100644 --- a/fs/quota/quota_v1.c +++ b/fs/quota/quota_v1.c @@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot) /* Set structure to 0s in case read fails/is after end of file */ memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); - dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); - if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && - dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) + if (dquot->dq_dqb.dqb_bhardlimit == 0 && + dquot->dq_dqb.dqb_bsoftlimit == 0 && + dquot->dq_dqb.dqb_ihardlimit == 0 && + dquot->dq_dqb.dqb_isoftlimit == 0) set_bit(DQ_FAKE_B, &dquot->dq_flags); dqstats.reads++; @@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot) v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); if (dquot->dq_id == 0) { - dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; - dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; + dqblk.dqb_btime = + sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; + dqblk.dqb_itime = + sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; } ret = 0; if (sb_dqopt(dquot->dq_sb)->files[type]) - ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); + ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, + (char *)&dqblk, sizeof(struct v1_disk_dqblk), + v1_dqoff(dquot->dq_id)); if (ret != sizeof(struct v1_disk_dqblk)) { printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", dquot->dq_sb->s_id); @@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type) return 0; blocks = isize >> BLOCK_SIZE_BITS; off = isize & (BLOCK_SIZE - 1); - if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) + if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % + sizeof(struct v1_disk_dqblk)) return 0; - /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + /* Doublecheck whether we didn't get file with new format - with old + * quotactl() this could happen */ + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, + sizeof(struct v2_disk_dqheader), 0); if (size != sizeof(struct v2_disk_dqheader)) return 1; /* Probably not new format */ if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) return 1; /* Definitely not new format */ - printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); + printk(KERN_INFO + "VFS: %s: Refusing to turn on old quota format on given file." + " It probably contains newer quota format.\n", sb->s_id); return 0; /* Seems like a new format file -> refuse it */ } @@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type) struct v1_disk_dqblk dqblk; int ret; - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0)); + if (ret != sizeof(struct v1_disk_dqblk)) { if (ret >= 0) ret = -EIO; goto out; @@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type) /* limits are stored as unsigned 32-bit data */ dqopt->info[type].dqi_maxblimit = 0xffffffff; dqopt->info[type].dqi_maxilimit = 0xffffffff; - dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; - dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; + dqopt->info[type].dqi_igrace = + dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; + dqopt->info[type].dqi_bgrace = + dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; out: return ret; } @@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type) int ret; dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; - if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, - sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { + ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), v1_dqoff(0)); + if (ret != sizeof(struct v1_disk_dqblk)) { if (ret >= 0) ret = -EIO; goto out; diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index b618b56..a5475fb 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type) static const uint quota_magics[] = V2_INITQMAGICS; static const uint quota_versions[] = V2_INITQVERSIONS; - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); + size = sb->s_op->quota_read(sb, type, (char *)&dqhead, + sizeof(struct v2_disk_dqheader), 0); if (size != sizeof(struct v2_disk_dqheader)) { printk("quota_v2: failed read expected=%zd got=%zd\n", sizeof(struct v2_disk_dqheader), size); -- cgit v0.10.2 From 620372a9ffc6f1239a4f379ff145de9e4b5d23ba Mon Sep 17 00:00:00 2001 From: Matt LaPlante Date: Tue, 10 Feb 2009 14:50:34 +0100 Subject: trivial: fix typos/grammar errors in fs/Kconfig Signed-off-by: Matt LaPlante Acked-by: Randy Dunlap Signed-off-by: Jiri Kosina Signed-off-by: Jan Kara diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index d9750d8..8047e01 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig @@ -33,7 +33,7 @@ config PRINT_QUOTA_WARNING Note that this behavior is currently deprecated and may go away in future. Please use notification via netlink socket instead. -# Generic support for tree structured quota files. Seleted when needed. +# Generic support for tree structured quota files. Selected when needed. config QUOTA_TREE tristate -- cgit v0.10.2 From c16831b4cc9b0805adf8ca3001752a7ec10a17bf Mon Sep 17 00:00:00 2001 From: Manish Katiyar Date: Thu, 12 Feb 2009 21:57:04 +0100 Subject: ext2: Zero our b_size in ext2_quota_read() ext2_quota_read() doesn't initialize tmp_bh.b_size before calling ext2_get_block() where we access it. Since it is a local variable it might contain some garbage. Make sure it is filled with reasonable value before passing. Signed-off-by: Manish Katiyar Signed-off-by: Jan Kara diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 7c6e360..f983225 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; + tmp_bh.b_size = sb->s_blocksize; err = ext2_get_block(inode, blk, &tmp_bh, 0); if (err < 0) return err; -- cgit v0.10.2 From d271e8bd8c60ce059ee36d836ba063cfc61c3e21 Mon Sep 17 00:00:00 2001 From: Holger Eitzenberger Date: Thu, 26 Mar 2009 13:37:14 +0100 Subject: ctnetlink: compute generic part of event more acurately On a box with most of the optional Netfilter switches turned off some of the NLAs are never send, e. g. secmark, mark or the conntrack byte/packet counters. As a worst case scenario this may possibly still lead to ctnetlink skbs being reallocated in netlink_trim() later, loosing all the nice effects from the previous patches. I try to solve that (at least partly) by correctly #ifdef'ing the NLAs in the computation. Signed-off-by: Holger Eitzenberger Signed-off-by: Patrick McHardy diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 03547c6..2fb833b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -441,19 +441,28 @@ ctnetlink_alloc_skb(const struct nf_conntrack_tuple *tuple, gfp_t gfp) + 3 * NLA_TYPE_SIZE(u_int8_t) /* CTA_PROTO_NUM */ + NLA_TYPE_SIZE(u_int32_t) /* CTA_ID */ + NLA_TYPE_SIZE(u_int32_t) /* CTA_STATUS */ +#ifdef CONFIG_NF_CT_ACCT + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_PACKETS */ + 2 * NLA_TYPE_SIZE(uint64_t) /* CTA_COUNTERS_BYTES */ +#endif + NLA_TYPE_SIZE(u_int32_t) /* CTA_TIMEOUT */ + nla_total_size(0) /* CTA_PROTOINFO */ + nla_total_size(0) /* CTA_HELP */ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ +#ifdef CONFIG_NF_CONNTRACK_SECMARK + NLA_TYPE_SIZE(u_int32_t) /* CTA_SECMARK */ +#endif +#ifdef CONFIG_NF_NAT_NEEDED + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_POS */ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_BEFORE */ + 2 * NLA_TYPE_SIZE(u_int32_t) /* CTA_NAT_SEQ_CORRECTION_AFTER */ - + NLA_TYPE_SIZE(u_int32_t); /* CTA_MARK */ +#endif +#ifdef CONFIG_NF_CONNTRACK_MARK + + NLA_TYPE_SIZE(u_int32_t) /* CTA_MARK */ +#endif + ; #undef NLA_TYPE_SIZE -- cgit v0.10.2 From e2ab3dff9d515ef69ac7c245b5ad1e348f2106be Mon Sep 17 00:00:00 2001 From: David Miller Date: Thu, 26 Mar 2009 16:44:17 -0700 Subject: sparc64: Fix build of timer_interrupt(). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit arch/sparc/kernel/time_64.c: In function ‘timer_interrupt’: arch/sparc/kernel/time_64.c:732: error: ‘struct kernel_stat’ has no member named ‘irqs’ make[1]: *** [arch/sparc/kernel/time_64.o] Error 1 Signed-off-by: David S. Miller Signed-off-by: Linus Torvalds diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 642562d..4ee2e48 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -724,12 +724,14 @@ void timer_interrupt(int irq, struct pt_regs *regs) unsigned long tick_mask = tick_ops->softint_mask; int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); + struct irq_desc *desc; clear_softint(tick_mask); irq_enter(); - kstat_this_cpu.irqs[0]++; + desc = irq_to_desc(0); + kstat_incr_irqs_this_cpu(0, desc); if (unlikely(!evt->event_handler)) { printk(KERN_WARNING -- cgit v0.10.2 From 7c757eb9f804782fb39d0ae2c1a88ffb9309138e Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 26 Mar 2009 16:25:49 -0700 Subject: RDMA/nes: Fix mis-merge When net-next and infiniband were merged upstream, each branch deleted one of a pair of adjacent lines from nes_nic.c, but when Linus fixed the conflict up, he brought back both of the lines. Fix up to the intended final tree state. Signed-off-by: Roland Dreier Acked-by: David S. Miller Signed-off-by: Linus Torvalds diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 8d3e4c6..ecb1f6f 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1602,8 +1602,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - netdev->vlan_rx_register = nes_netdev_vlan_rx_register; - netdev->features |= NETIF_F_LLTX; /* Fill in the port structure */ nesvnic->netdev = netdev; -- cgit v0.10.2 From 8f1ead2d1a626ed0c85b3d2c2046a49081d5933f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 26 Mar 2009 00:59:10 -0700 Subject: GRO: Disable GRO on legacy netif_rx path When I fixed the GRO crash in the legacy receive path I used napi_complete to replace __napi_complete. Unfortunately they're not the same when NETPOLL is enabled, which may result in us not calling __napi_complete at all. What's more, we really do need to keep the __napi_complete call within the IRQ-off section since in theory an IRQ can occur in between and fill up the backlog to the maximum, causing us to lock up. Since we can't seem to find a fix that works properly right now, this patch reverts all the GRO support from the netif_rx path. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 052dd47..63ec4bf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2627,18 +2627,15 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); if (!skb) { + __napi_complete(napi); local_irq_enable(); - napi_complete(napi); - goto out; + break; } local_irq_enable(); - napi_gro_receive(napi, skb); + netif_receive_skb(skb); } while (++work < quota && jiffies == start_time); - napi_gro_flush(napi); - -out: return work; } -- cgit v0.10.2 From a1702857724fb39cb68ce581490010df99168fd0 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 27 Mar 2009 00:12:24 -0700 Subject: net: Add support for the OpenCores 10/100 Mbps Ethernet MAC. This patch adds a platform device driver that supports the OpenCores 10/100 Mbps Ethernet MAC. The driver expects three resources: one IORESOURCE_MEM resource defines the memory region for the core's memory-mapped registers while a second IORESOURCE_MEM resource defines the network packet buffer space. The third resource, of type IORESOURCE_IRQ, associates an interrupt with the driver. Signed-off-by: Thierry Reding Acked-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e5ffc1c..f062b42 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -972,6 +972,14 @@ config ENC28J60_WRITEVERIFY Enable the verify after the buffer write useful for debugging purpose. If unsure, say N. +config ETHOC + tristate "OpenCores 10/100 Mbps Ethernet MAC support" + depends on NET_ETHERNET + select MII + select PHYLIB + help + Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. + config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 758ecdf..98409c9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -230,6 +230,7 @@ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_ENC28J60) += enc28j60.o +obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c new file mode 100644 index 0000000..91a9b1a --- /dev/null +++ b/drivers/net/ethoc.c @@ -0,0 +1,1112 @@ +/* + * linux/drivers/net/ethoc.c + * + * Copyright (C) 2007-2008 Avionic Design Development GmbH + * Copyright (C) 2008-2009 Avionic Design GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by Thierry Reding + */ + +#include +#include +#include +#include +#include +#include +#include + +/* register offsets */ +#define MODER 0x00 +#define INT_SOURCE 0x04 +#define INT_MASK 0x08 +#define IPGT 0x0c +#define IPGR1 0x10 +#define IPGR2 0x14 +#define PACKETLEN 0x18 +#define COLLCONF 0x1c +#define TX_BD_NUM 0x20 +#define CTRLMODER 0x24 +#define MIIMODER 0x28 +#define MIICOMMAND 0x2c +#define MIIADDRESS 0x30 +#define MIITX_DATA 0x34 +#define MIIRX_DATA 0x38 +#define MIISTATUS 0x3c +#define MAC_ADDR0 0x40 +#define MAC_ADDR1 0x44 +#define ETH_HASH0 0x48 +#define ETH_HASH1 0x4c +#define ETH_TXCTRL 0x50 + +/* mode register */ +#define MODER_RXEN (1 << 0) /* receive enable */ +#define MODER_TXEN (1 << 1) /* transmit enable */ +#define MODER_NOPRE (1 << 2) /* no preamble */ +#define MODER_BRO (1 << 3) /* broadcast address */ +#define MODER_IAM (1 << 4) /* individual address mode */ +#define MODER_PRO (1 << 5) /* promiscuous mode */ +#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ +#define MODER_LOOP (1 << 7) /* loopback */ +#define MODER_NBO (1 << 8) /* no back-off */ +#define MODER_EDE (1 << 9) /* excess defer enable */ +#define MODER_FULLD (1 << 10) /* full duplex */ +#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ +#define MODER_DCRC (1 << 12) /* delayed CRC enable */ +#define MODER_CRC (1 << 13) /* CRC enable */ +#define MODER_HUGE (1 << 14) /* huge packets enable */ +#define MODER_PAD (1 << 15) /* padding enabled */ +#define MODER_RSM (1 << 16) /* receive small packets */ + +/* interrupt source and mask registers */ +#define INT_MASK_TXF (1 << 0) /* transmit frame */ +#define INT_MASK_TXE (1 << 1) /* transmit error */ +#define INT_MASK_RXF (1 << 2) /* receive frame */ +#define INT_MASK_RXE (1 << 3) /* receive error */ +#define INT_MASK_BUSY (1 << 4) +#define INT_MASK_TXC (1 << 5) /* transmit control frame */ +#define INT_MASK_RXC (1 << 6) /* receive control frame */ + +#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) +#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) + +#define INT_MASK_ALL ( \ + INT_MASK_TXF | INT_MASK_TXE | \ + INT_MASK_RXF | INT_MASK_RXE | \ + INT_MASK_TXC | INT_MASK_RXC | \ + INT_MASK_BUSY \ + ) + +/* packet length register */ +#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) +#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) +#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ + PACKETLEN_MAX(max)) + +/* transmit buffer number register */ +#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) + +/* control module mode register */ +#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ +#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ +#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ + +/* MII mode register */ +#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ +#define MIIMODER_NOPRE (1 << 8) /* no preamble */ + +/* MII command register */ +#define MIICOMMAND_SCAN (1 << 0) /* scan status */ +#define MIICOMMAND_READ (1 << 1) /* read status */ +#define MIICOMMAND_WRITE (1 << 2) /* write control data */ + +/* MII address register */ +#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) +#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) +#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ + MIIADDRESS_RGAD(reg)) + +/* MII transmit data register */ +#define MIITX_DATA_VAL(x) ((x) & 0xffff) + +/* MII receive data register */ +#define MIIRX_DATA_VAL(x) ((x) & 0xffff) + +/* MII status register */ +#define MIISTATUS_LINKFAIL (1 << 0) +#define MIISTATUS_BUSY (1 << 1) +#define MIISTATUS_INVALID (1 << 2) + +/* TX buffer descriptor */ +#define TX_BD_CS (1 << 0) /* carrier sense lost */ +#define TX_BD_DF (1 << 1) /* defer indication */ +#define TX_BD_LC (1 << 2) /* late collision */ +#define TX_BD_RL (1 << 3) /* retransmission limit */ +#define TX_BD_RETRY_MASK (0x00f0) +#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) +#define TX_BD_UR (1 << 8) /* transmitter underrun */ +#define TX_BD_CRC (1 << 11) /* TX CRC enable */ +#define TX_BD_PAD (1 << 12) /* pad enable for short packets */ +#define TX_BD_WRAP (1 << 13) +#define TX_BD_IRQ (1 << 14) /* interrupt request enable */ +#define TX_BD_READY (1 << 15) /* TX buffer ready */ +#define TX_BD_LEN(x) (((x) & 0xffff) << 16) +#define TX_BD_LEN_MASK (0xffff << 16) + +#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ + TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) + +/* RX buffer descriptor */ +#define RX_BD_LC (1 << 0) /* late collision */ +#define RX_BD_CRC (1 << 1) /* RX CRC error */ +#define RX_BD_SF (1 << 2) /* short frame */ +#define RX_BD_TL (1 << 3) /* too long */ +#define RX_BD_DN (1 << 4) /* dribble nibble */ +#define RX_BD_IS (1 << 5) /* invalid symbol */ +#define RX_BD_OR (1 << 6) /* receiver overrun */ +#define RX_BD_MISS (1 << 7) +#define RX_BD_CF (1 << 8) /* control frame */ +#define RX_BD_WRAP (1 << 13) +#define RX_BD_IRQ (1 << 14) /* interrupt request enable */ +#define RX_BD_EMPTY (1 << 15) +#define RX_BD_LEN(x) (((x) & 0xffff) << 16) + +#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ + RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) + +#define ETHOC_BUFSIZ 1536 +#define ETHOC_ZLEN 64 +#define ETHOC_BD_BASE 0x400 +#define ETHOC_TIMEOUT (HZ / 2) +#define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) + +/** + * struct ethoc - driver-private device structure + * @iobase: pointer to I/O memory region + * @membase: pointer to buffer memory region + * @num_tx: number of send buffers + * @cur_tx: last send buffer written + * @dty_tx: last buffer actually sent + * @num_rx: number of receive buffers + * @cur_rx: current receive buffer + * @netdev: pointer to network device structure + * @napi: NAPI structure + * @stats: network device statistics + * @msg_enable: device state flags + * @rx_lock: receive lock + * @lock: device lock + * @phy: attached PHY + * @mdio: MDIO bus for PHY access + * @phy_id: address of attached PHY + */ +struct ethoc { + void __iomem *iobase; + void __iomem *membase; + + unsigned int num_tx; + unsigned int cur_tx; + unsigned int dty_tx; + + unsigned int num_rx; + unsigned int cur_rx; + + struct net_device *netdev; + struct napi_struct napi; + struct net_device_stats stats; + u32 msg_enable; + + spinlock_t rx_lock; + spinlock_t lock; + + struct phy_device *phy; + struct mii_bus *mdio; + s8 phy_id; +}; + +/** + * struct ethoc_bd - buffer descriptor + * @stat: buffer statistics + * @addr: physical memory address + */ +struct ethoc_bd { + u32 stat; + u32 addr; +}; + +static u32 ethoc_read(struct ethoc *dev, loff_t offset) +{ + return ioread32(dev->iobase + offset); +} + +static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) +{ + iowrite32(data, dev->iobase + offset); +} + +static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) +{ + loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); + bd->stat = ethoc_read(dev, offset + 0); + bd->addr = ethoc_read(dev, offset + 4); +} + +static void ethoc_write_bd(struct ethoc *dev, int index, + const struct ethoc_bd *bd) +{ + loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); + ethoc_write(dev, offset + 0, bd->stat); + ethoc_write(dev, offset + 4, bd->addr); +} + +static void ethoc_enable_irq(struct ethoc *dev, u32 mask) +{ + u32 imask = ethoc_read(dev, INT_MASK); + imask |= mask; + ethoc_write(dev, INT_MASK, imask); +} + +static void ethoc_disable_irq(struct ethoc *dev, u32 mask) +{ + u32 imask = ethoc_read(dev, INT_MASK); + imask &= ~mask; + ethoc_write(dev, INT_MASK, imask); +} + +static void ethoc_ack_irq(struct ethoc *dev, u32 mask) +{ + ethoc_write(dev, INT_SOURCE, mask); +} + +static void ethoc_enable_rx_and_tx(struct ethoc *dev) +{ + u32 mode = ethoc_read(dev, MODER); + mode |= MODER_RXEN | MODER_TXEN; + ethoc_write(dev, MODER, mode); +} + +static void ethoc_disable_rx_and_tx(struct ethoc *dev) +{ + u32 mode = ethoc_read(dev, MODER); + mode &= ~(MODER_RXEN | MODER_TXEN); + ethoc_write(dev, MODER, mode); +} + +static int ethoc_init_ring(struct ethoc *dev) +{ + struct ethoc_bd bd; + int i; + + dev->cur_tx = 0; + dev->dty_tx = 0; + dev->cur_rx = 0; + + /* setup transmission buffers */ + bd.addr = 0; + bd.stat = TX_BD_IRQ | TX_BD_CRC; + + for (i = 0; i < dev->num_tx; i++) { + if (i == dev->num_tx - 1) + bd.stat |= TX_BD_WRAP; + + ethoc_write_bd(dev, i, &bd); + bd.addr += ETHOC_BUFSIZ; + } + + bd.addr = dev->num_tx * ETHOC_BUFSIZ; + bd.stat = RX_BD_EMPTY | RX_BD_IRQ; + + for (i = 0; i < dev->num_rx; i++) { + if (i == dev->num_rx - 1) + bd.stat |= RX_BD_WRAP; + + ethoc_write_bd(dev, dev->num_tx + i, &bd); + bd.addr += ETHOC_BUFSIZ; + } + + return 0; +} + +static int ethoc_reset(struct ethoc *dev) +{ + u32 mode; + + /* TODO: reset controller? */ + + ethoc_disable_rx_and_tx(dev); + + /* TODO: setup registers */ + + /* enable FCS generation and automatic padding */ + mode = ethoc_read(dev, MODER); + mode |= MODER_CRC | MODER_PAD; + ethoc_write(dev, MODER, mode); + + /* set full-duplex mode */ + mode = ethoc_read(dev, MODER); + mode |= MODER_FULLD; + ethoc_write(dev, MODER, mode); + ethoc_write(dev, IPGT, 0x15); + + ethoc_ack_irq(dev, INT_MASK_ALL); + ethoc_enable_irq(dev, INT_MASK_ALL); + ethoc_enable_rx_and_tx(dev); + return 0; +} + +static unsigned int ethoc_update_rx_stats(struct ethoc *dev, + struct ethoc_bd *bd) +{ + struct net_device *netdev = dev->netdev; + unsigned int ret = 0; + + if (bd->stat & RX_BD_TL) { + dev_err(&netdev->dev, "RX: frame too long\n"); + dev->stats.rx_length_errors++; + ret++; + } + + if (bd->stat & RX_BD_SF) { + dev_err(&netdev->dev, "RX: frame too short\n"); + dev->stats.rx_length_errors++; + ret++; + } + + if (bd->stat & RX_BD_DN) { + dev_err(&netdev->dev, "RX: dribble nibble\n"); + dev->stats.rx_frame_errors++; + } + + if (bd->stat & RX_BD_CRC) { + dev_err(&netdev->dev, "RX: wrong CRC\n"); + dev->stats.rx_crc_errors++; + ret++; + } + + if (bd->stat & RX_BD_OR) { + dev_err(&netdev->dev, "RX: overrun\n"); + dev->stats.rx_over_errors++; + ret++; + } + + if (bd->stat & RX_BD_MISS) + dev->stats.rx_missed_errors++; + + if (bd->stat & RX_BD_LC) { + dev_err(&netdev->dev, "RX: late collision\n"); + dev->stats.collisions++; + ret++; + } + + return ret; +} + +static int ethoc_rx(struct net_device *dev, int limit) +{ + struct ethoc *priv = netdev_priv(dev); + int count; + + for (count = 0; count < limit; ++count) { + unsigned int entry; + struct ethoc_bd bd; + + entry = priv->num_tx + (priv->cur_rx % priv->num_rx); + ethoc_read_bd(priv, entry, &bd); + if (bd.stat & RX_BD_EMPTY) + break; + + if (ethoc_update_rx_stats(priv, &bd) == 0) { + int size = bd.stat >> 16; + struct sk_buff *skb = netdev_alloc_skb(dev, size); + if (likely(skb)) { + void *src = priv->membase + bd.addr; + memcpy_fromio(skb_put(skb, size), src, size); + skb->protocol = eth_type_trans(skb, dev); + dev->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += size; + netif_receive_skb(skb); + } else { + if (net_ratelimit()) + dev_warn(&dev->dev, "low on memory - " + "packet dropped\n"); + + priv->stats.rx_dropped++; + break; + } + } + + /* clear the buffer descriptor so it can be reused */ + bd.stat &= ~RX_BD_STATS; + bd.stat |= RX_BD_EMPTY; + ethoc_write_bd(priv, entry, &bd); + priv->cur_rx++; + } + + return count; +} + +static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) +{ + struct net_device *netdev = dev->netdev; + + if (bd->stat & TX_BD_LC) { + dev_err(&netdev->dev, "TX: late collision\n"); + dev->stats.tx_window_errors++; + } + + if (bd->stat & TX_BD_RL) { + dev_err(&netdev->dev, "TX: retransmit limit\n"); + dev->stats.tx_aborted_errors++; + } + + if (bd->stat & TX_BD_UR) { + dev_err(&netdev->dev, "TX: underrun\n"); + dev->stats.tx_fifo_errors++; + } + + if (bd->stat & TX_BD_CS) { + dev_err(&netdev->dev, "TX: carrier sense lost\n"); + dev->stats.tx_carrier_errors++; + } + + if (bd->stat & TX_BD_STATS) + dev->stats.tx_errors++; + + dev->stats.collisions += (bd->stat >> 4) & 0xf; + dev->stats.tx_bytes += bd->stat >> 16; + dev->stats.tx_packets++; + return 0; +} + +static void ethoc_tx(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + + spin_lock(&priv->lock); + + while (priv->dty_tx != priv->cur_tx) { + unsigned int entry = priv->dty_tx % priv->num_tx; + struct ethoc_bd bd; + + ethoc_read_bd(priv, entry, &bd); + if (bd.stat & TX_BD_READY) + break; + + entry = (++priv->dty_tx) % priv->num_tx; + (void)ethoc_update_tx_stats(priv, &bd); + } + + if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) + netif_wake_queue(dev); + + ethoc_ack_irq(priv, INT_MASK_TX); + spin_unlock(&priv->lock); +} + +static irqreturn_t ethoc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct ethoc *priv = netdev_priv(dev); + u32 pending; + + ethoc_disable_irq(priv, INT_MASK_ALL); + pending = ethoc_read(priv, INT_SOURCE); + if (unlikely(pending == 0)) { + ethoc_enable_irq(priv, INT_MASK_ALL); + return IRQ_NONE; + } + + ethoc_ack_irq(priv, INT_MASK_ALL); + + if (pending & INT_MASK_BUSY) { + dev_err(&dev->dev, "packet dropped\n"); + priv->stats.rx_dropped++; + } + + if (pending & INT_MASK_RX) { + if (napi_schedule_prep(&priv->napi)) + __napi_schedule(&priv->napi); + } else { + ethoc_enable_irq(priv, INT_MASK_RX); + } + + if (pending & INT_MASK_TX) + ethoc_tx(dev); + + ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX); + return IRQ_HANDLED; +} + +static int ethoc_get_mac_address(struct net_device *dev, void *addr) +{ + struct ethoc *priv = netdev_priv(dev); + u8 *mac = (u8 *)addr; + u32 reg; + + reg = ethoc_read(priv, MAC_ADDR0); + mac[2] = (reg >> 24) & 0xff; + mac[3] = (reg >> 16) & 0xff; + mac[4] = (reg >> 8) & 0xff; + mac[5] = (reg >> 0) & 0xff; + + reg = ethoc_read(priv, MAC_ADDR1); + mac[0] = (reg >> 8) & 0xff; + mac[1] = (reg >> 0) & 0xff; + + return 0; +} + +static int ethoc_poll(struct napi_struct *napi, int budget) +{ + struct ethoc *priv = container_of(napi, struct ethoc, napi); + int work_done = 0; + + work_done = ethoc_rx(priv->netdev, budget); + if (work_done < budget) { + ethoc_enable_irq(priv, INT_MASK_RX); + napi_complete(napi); + } + + return work_done; +} + +static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) +{ + unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT; + struct ethoc *priv = bus->priv; + + ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); + ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); + + while (time_before(jiffies, timeout)) { + u32 status = ethoc_read(priv, MIISTATUS); + if (!(status & MIISTATUS_BUSY)) { + u32 data = ethoc_read(priv, MIIRX_DATA); + /* reset MII command register */ + ethoc_write(priv, MIICOMMAND, 0); + return data; + } + + schedule(); + } + + return -EBUSY; +} + +static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT; + struct ethoc *priv = bus->priv; + + ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); + ethoc_write(priv, MIITX_DATA, val); + ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); + + while (time_before(jiffies, timeout)) { + u32 stat = ethoc_read(priv, MIISTATUS); + if (!(stat & MIISTATUS_BUSY)) + return 0; + + schedule(); + } + + return -EBUSY; +} + +static int ethoc_mdio_reset(struct mii_bus *bus) +{ + return 0; +} + +static void ethoc_mdio_poll(struct net_device *dev) +{ +} + +static int ethoc_mdio_probe(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + struct phy_device *phy; + int i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + phy = priv->mdio->phy_map[i]; + if (phy) { + if (priv->phy_id != -1) { + /* attach to specified PHY */ + if (priv->phy_id == phy->addr) + break; + } else { + /* autoselect PHY if none was specified */ + if (phy->addr != 0) + break; + } + } + } + + if (!phy) { + dev_err(&dev->dev, "no PHY found\n"); + return -ENXIO; + } + + phy = phy_connect(dev, dev_name(&phy->dev), ðoc_mdio_poll, 0, + PHY_INTERFACE_MODE_GMII); + if (IS_ERR(phy)) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return PTR_ERR(phy); + } + + priv->phy = phy; + return 0; +} + +static int ethoc_open(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + unsigned int min_tx = 2; + unsigned int num_bd; + int ret; + + ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + return ret; + + /* calculate the number of TX/RX buffers */ + num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; + priv->num_tx = min(min_tx, num_bd / 4); + priv->num_rx = num_bd - priv->num_tx; + ethoc_write(priv, TX_BD_NUM, priv->num_tx); + + ethoc_init_ring(priv); + ethoc_reset(priv); + + if (netif_queue_stopped(dev)) { + dev_dbg(&dev->dev, " resuming queue\n"); + netif_wake_queue(dev); + } else { + dev_dbg(&dev->dev, " starting queue\n"); + netif_start_queue(dev); + } + + phy_start(priv->phy); + napi_enable(&priv->napi); + + if (netif_msg_ifup(priv)) { + dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", + dev->base_addr, dev->mem_start, dev->mem_end); + } + + return 0; +} + +static int ethoc_stop(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + + napi_disable(&priv->napi); + + if (priv->phy) + phy_stop(priv->phy); + + ethoc_disable_rx_and_tx(priv); + free_irq(dev->irq, dev); + + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + + return 0; +} + +static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ethoc *priv = netdev_priv(dev); + struct mii_ioctl_data *mdio = if_mii(ifr); + struct phy_device *phy = NULL; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd != SIOCGMIIPHY) { + if (mdio->phy_id >= PHY_MAX_ADDR) + return -ERANGE; + + phy = priv->mdio->phy_map[mdio->phy_id]; + if (!phy) + return -ENODEV; + } else { + phy = priv->phy; + } + + return phy_mii_ioctl(phy, mdio, cmd); +} + +static int ethoc_config(struct net_device *dev, struct ifmap *map) +{ + return -ENOSYS; +} + +static int ethoc_set_mac_address(struct net_device *dev, void *addr) +{ + struct ethoc *priv = netdev_priv(dev); + u8 *mac = (u8 *)addr; + + ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | + (mac[4] << 8) | (mac[5] << 0)); + ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); + + return 0; +} + +static void ethoc_set_multicast_list(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + u32 mode = ethoc_read(priv, MODER); + struct dev_mc_list *mc = NULL; + u32 hash[2] = { 0, 0 }; + + /* set loopback mode if requested */ + if (dev->flags & IFF_LOOPBACK) + mode |= MODER_LOOP; + else + mode &= ~MODER_LOOP; + + /* receive broadcast frames if requested */ + if (dev->flags & IFF_BROADCAST) + mode &= ~MODER_BRO; + else + mode |= MODER_BRO; + + /* enable promiscuous mode if requested */ + if (dev->flags & IFF_PROMISC) + mode |= MODER_PRO; + else + mode &= ~MODER_PRO; + + ethoc_write(priv, MODER, mode); + + /* receive multicast frames */ + if (dev->flags & IFF_ALLMULTI) { + hash[0] = 0xffffffff; + hash[1] = 0xffffffff; + } else { + for (mc = dev->mc_list; mc; mc = mc->next) { + u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr); + int bit = (crc >> 26) & 0x3f; + hash[bit >> 5] |= 1 << (bit & 0x1f); + } + } + + ethoc_write(priv, ETH_HASH0, hash[0]); + ethoc_write(priv, ETH_HASH1, hash[1]); +} + +static int ethoc_change_mtu(struct net_device *dev, int new_mtu) +{ + return -ENOSYS; +} + +static void ethoc_tx_timeout(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + u32 pending = ethoc_read(priv, INT_SOURCE); + if (likely(pending)) + ethoc_interrupt(dev->irq, dev); +} + +static struct net_device_stats *ethoc_stats(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + return &priv->stats; +} + +static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + struct ethoc_bd bd; + unsigned int entry; + void *dest; + + if (unlikely(skb->len > ETHOC_BUFSIZ)) { + priv->stats.tx_errors++; + return -EMSGSIZE; + } + + entry = priv->cur_tx % priv->num_tx; + spin_lock_irq(&priv->lock); + priv->cur_tx++; + + ethoc_read_bd(priv, entry, &bd); + if (unlikely(skb->len < ETHOC_ZLEN)) + bd.stat |= TX_BD_PAD; + else + bd.stat &= ~TX_BD_PAD; + + dest = priv->membase + bd.addr; + memcpy_toio(dest, skb->data, skb->len); + + bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); + bd.stat |= TX_BD_LEN(skb->len); + ethoc_write_bd(priv, entry, &bd); + + bd.stat |= TX_BD_READY; + ethoc_write_bd(priv, entry, &bd); + + if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { + dev_dbg(&dev->dev, "stopping queue\n"); + netif_stop_queue(dev); + } + + dev->trans_start = jiffies; + dev_kfree_skb(skb); + + spin_unlock_irq(&priv->lock); + return NETDEV_TX_OK; +} + +static const struct net_device_ops ethoc_netdev_ops = { + .ndo_open = ethoc_open, + .ndo_stop = ethoc_stop, + .ndo_do_ioctl = ethoc_ioctl, + .ndo_set_config = ethoc_config, + .ndo_set_mac_address = ethoc_set_mac_address, + .ndo_set_multicast_list = ethoc_set_multicast_list, + .ndo_change_mtu = ethoc_change_mtu, + .ndo_tx_timeout = ethoc_tx_timeout, + .ndo_get_stats = ethoc_stats, + .ndo_start_xmit = ethoc_start_xmit, +}; + +/** + * ethoc_probe() - initialize OpenCores ethernet MAC + * pdev: platform device + */ +static int ethoc_probe(struct platform_device *pdev) +{ + struct net_device *netdev = NULL; + struct resource *res = NULL; + struct resource *mmio = NULL; + struct resource *mem = NULL; + struct ethoc *priv = NULL; + unsigned int phy; + int ret = 0; + + /* allocate networking device */ + netdev = alloc_etherdev(sizeof(struct ethoc)); + if (!netdev) { + dev_err(&pdev->dev, "cannot allocate network device\n"); + ret = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + platform_set_drvdata(pdev, netdev); + + /* obtain I/O memory space */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); + ret = -ENXIO; + goto free; + } + + mmio = devm_request_mem_region(&pdev->dev, res->start, + res->end - res->start + 1, res->name); + if (!res) { + dev_err(&pdev->dev, "cannot request I/O memory space\n"); + ret = -ENXIO; + goto free; + } + + netdev->base_addr = mmio->start; + + /* obtain buffer memory space */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "cannot obtain memory space\n"); + ret = -ENXIO; + goto free; + } + + mem = devm_request_mem_region(&pdev->dev, res->start, + res->end - res->start + 1, res->name); + if (!mem) { + dev_err(&pdev->dev, "cannot request memory space\n"); + ret = -ENXIO; + goto free; + } + + netdev->mem_start = mem->start; + netdev->mem_end = mem->end; + + /* obtain device IRQ number */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain IRQ\n"); + ret = -ENXIO; + goto free; + } + + netdev->irq = res->start; + + /* setup driver-private data */ + priv = netdev_priv(netdev); + priv->netdev = netdev; + + priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, + mmio->end - mmio->start + 1); + if (!priv->iobase) { + dev_err(&pdev->dev, "cannot remap I/O memory space\n"); + ret = -ENXIO; + goto error; + } + + priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, + mem->end - mem->start + 1); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot remap memory space\n"); + ret = -ENXIO; + goto error; + } + + /* Allow the platform setup code to pass in a MAC address. */ + if (pdev->dev.platform_data) { + struct ethoc_platform_data *pdata = + (struct ethoc_platform_data *)pdev->dev.platform_data; + memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); + priv->phy_id = pdata->phy_id; + } + + /* Check that the given MAC address is valid. If it isn't, read the + * current MAC from the controller. */ + if (!is_valid_ether_addr(netdev->dev_addr)) + ethoc_get_mac_address(netdev, netdev->dev_addr); + + /* Check the MAC again for validity, if it still isn't choose and + * program a random one. */ + if (!is_valid_ether_addr(netdev->dev_addr)) + random_ether_addr(netdev->dev_addr); + + ethoc_set_mac_address(netdev, netdev->dev_addr); + + /* register MII bus */ + priv->mdio = mdiobus_alloc(); + if (!priv->mdio) { + ret = -ENOMEM; + goto free; + } + + priv->mdio->name = "ethoc-mdio"; + snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", + priv->mdio->name, pdev->id); + priv->mdio->read = ethoc_mdio_read; + priv->mdio->write = ethoc_mdio_write; + priv->mdio->reset = ethoc_mdio_reset; + priv->mdio->priv = priv; + + priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!priv->mdio->irq) { + ret = -ENOMEM; + goto free_mdio; + } + + for (phy = 0; phy < PHY_MAX_ADDR; phy++) + priv->mdio->irq[phy] = PHY_POLL; + + ret = mdiobus_register(priv->mdio); + if (ret) { + dev_err(&netdev->dev, "failed to register MDIO bus\n"); + goto free_mdio; + } + + ret = ethoc_mdio_probe(netdev); + if (ret) { + dev_err(&netdev->dev, "failed to probe MDIO bus\n"); + goto error; + } + + ether_setup(netdev); + + /* setup the net_device structure */ + netdev->netdev_ops = ðoc_netdev_ops; + netdev->watchdog_timeo = ETHOC_TIMEOUT; + netdev->features |= 0; + + /* setup NAPI */ + memset(&priv->napi, 0, sizeof(priv->napi)); + netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); + + spin_lock_init(&priv->rx_lock); + spin_lock_init(&priv->lock); + + ret = register_netdev(netdev); + if (ret < 0) { + dev_err(&netdev->dev, "failed to register interface\n"); + goto error; + } + + goto out; + +error: + mdiobus_unregister(priv->mdio); +free_mdio: + kfree(priv->mdio->irq); + mdiobus_free(priv->mdio); +free: + free_netdev(netdev); +out: + return ret; +} + +/** + * ethoc_remove() - shutdown OpenCores ethernet MAC + * @pdev: platform device + */ +static int ethoc_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct ethoc *priv = netdev_priv(netdev); + + platform_set_drvdata(pdev, NULL); + + if (netdev) { + phy_disconnect(priv->phy); + priv->phy = NULL; + + if (priv->mdio) { + mdiobus_unregister(priv->mdio); + kfree(priv->mdio->irq); + mdiobus_free(priv->mdio); + } + + unregister_netdev(netdev); + free_netdev(netdev); + } + + return 0; +} + +#ifdef CONFIG_PM +static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) +{ + return -ENOSYS; +} + +static int ethoc_resume(struct platform_device *pdev) +{ + return -ENOSYS; +} +#else +# define ethoc_suspend NULL +# define ethoc_resume NULL +#endif + +static struct platform_driver ethoc_driver = { + .probe = ethoc_probe, + .remove = ethoc_remove, + .suspend = ethoc_suspend, + .resume = ethoc_resume, + .driver = { + .name = "ethoc", + }, +}; + +static int __init ethoc_init(void) +{ + return platform_driver_register(ðoc_driver); +} + +static void __exit ethoc_exit(void) +{ + platform_driver_unregister(ðoc_driver); +} + +module_init(ethoc_init); +module_exit(ethoc_exit); + +MODULE_AUTHOR("Thierry Reding "); +MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/include/net/ethoc.h b/include/net/ethoc.h new file mode 100644 index 0000000..96f3789 --- /dev/null +++ b/include/net/ethoc.h @@ -0,0 +1,22 @@ +/* + * linux/include/net/ethoc.h + * + * Copyright (C) 2008-2009 Avionic Design GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by Thierry Reding + */ + +#ifndef LINUX_NET_ETHOC_H +#define LINUX_NET_ETHOC_H 1 + +struct ethoc_platform_data { + u8 hwaddr[IFHWADDRLEN]; + s8 phy_id; +}; + +#endif /* !LINUX_NET_ETHOC_H */ + -- cgit v0.10.2 From 71f6f6dfdf7c7a67462386d9ea05c1095a89c555 Mon Sep 17 00:00:00 2001 From: Jesper Nilsson Date: Fri, 27 Mar 2009 00:17:45 -0700 Subject: ipv6: Plug sk_buff leak in ipv6_rcv (net/ipv6/ip6_input.c) Commit 778d80be52699596bf70e0eb0761cf5e1e46088d (ipv6: Add disable_ipv6 sysctl to disable IPv6 operaion on specific interface) seems to have introduced a leak of sk_buff's for ipv6 traffic, at least in some configurations where idev is NULL, or when ipv6 is disabled via sysctl. The problem is that if the first condition of the if-statement returns non-NULL, it returns an skb with only one reference, and when the other conditions apply, execution jumps to the "out" label, which does not call kfree_skb for it. To plug this leak, change to use the "drop" label instead. (this relies on it being ok to call kfree_skb on NULL) This also allows us to avoid calling rcu_read_unlock here, and removes the only user of the "out" label. Signed-off-by: Jesper Nilsson Signed-off-by: David S. Miller diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index f171e8d..8f04bd9 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -75,8 +75,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); - rcu_read_unlock(); - goto out; + goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); @@ -147,7 +146,6 @@ err: drop: rcu_read_unlock(); kfree_skb(skb); -out: return 0; } -- cgit v0.10.2 From 7d0b591c655ca0d72ebcbd242cf659a20a8995c5 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Fri, 27 Mar 2009 00:22:01 -0700 Subject: xfrm: spin_lock() should be spin_unlock() in xfrm_state.c spin_lock() should be spin_unlock() in xfrm_state_walk_done(). caused by: commit 12a169e7d8f4b1c95252d8b04ed0f1033ed7cfe2 "ipsec: Put dumpers on the dump list" Reported-by: Marc Milgram Signed-off-by: Chuck Ebbert Signed-off-by: David S. Miller diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 62a5425..8227172 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1615,7 +1615,7 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk) spin_lock_bh(&xfrm_state_lock); list_del(&walk->all); - spin_lock_bh(&xfrm_state_lock); + spin_unlock_bh(&xfrm_state_lock); } EXPORT_SYMBOL(xfrm_state_walk_done); -- cgit v0.10.2 From 65f71b8bd2651e6d6ca9b09fe53a8db2da22b85c Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 27 Mar 2009 00:25:24 -0700 Subject: benet: use do_div() for 64 bit divide The benet driver is doing a 64 bit divide, which is not supported in Linux kernel on 32 bit architectures. The correct way to do this is to use do_div(). Compile tested on i386 only. Signed-off-by: Stephen Hemminger Acked-by: Randy Dunlap Signed-off-by: David S. Miller diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index f901fee..9b75aa6 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -16,6 +16,7 @@ */ #include "be.h" +#include MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -290,6 +291,17 @@ static struct net_device_stats *be_get_stats(struct net_device *dev) return &adapter->stats.net_stats; } +static u32 be_calc_rate(u64 bytes, unsigned long ticks) +{ + u64 rate = bytes; + + do_div(rate, ticks / HZ); + rate <<= 3; /* bytes/sec -> bits/sec */ + do_div(rate, 1000000ul); /* MB/Sec */ + + return rate; +} + static void be_tx_rate_update(struct be_adapter *adapter) { struct be_drvr_stats *stats = drvr_stats(adapter); @@ -303,11 +315,9 @@ static void be_tx_rate_update(struct be_adapter *adapter) /* Update tx rate once in two seconds */ if ((now - stats->be_tx_jiffies) > 2 * HZ) { - u32 r; - r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) / - ((now - stats->be_tx_jiffies) / HZ); - r = r / 1000000; /* M bytes/s */ - stats->be_tx_rate = r * 8; /* M bits/s */ + stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes + - stats->be_tx_bytes_prev, + now - stats->be_tx_jiffies); stats->be_tx_jiffies = now; stats->be_tx_bytes_prev = stats->be_tx_bytes; } @@ -599,7 +609,6 @@ static void be_rx_rate_update(struct be_adapter *adapter) { struct be_drvr_stats *stats = drvr_stats(adapter); ulong now = jiffies; - u32 rate; /* Wrapped around */ if (time_before(now, stats->be_rx_jiffies)) { @@ -611,10 +620,9 @@ static void be_rx_rate_update(struct be_adapter *adapter) if ((now - stats->be_rx_jiffies) < 2 * HZ) return; - rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) / - ((now - stats->be_rx_jiffies) / HZ); - rate = rate / 1000000; /* MB/Sec */ - stats->be_rx_rate = rate * 8; /* Mega Bits/Sec */ + stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes + - stats->be_rx_bytes_prev, + now - stats->be_rx_jiffies); stats->be_rx_jiffies = now; stats->be_rx_bytes_prev = stats->be_rx_bytes; } -- cgit v0.10.2 From 03ba999117eb8688252f9068356b6e028c2c3a56 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 27 Mar 2009 00:27:18 -0700 Subject: appletalk: this warning can go I think Its past 2.2 ... Signed-off-by: Alan Cox Signed-off-by: David S. Miller diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 3e0671d..d6a9243 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1571,14 +1571,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr usat->sat_family != AF_APPLETALK) return -EINVAL; - /* netatalk doesn't implement this check */ + /* netatalk didn't implement this check */ if (usat->sat_addr.s_node == ATADDR_BCAST && !sock_flag(sk, SOCK_BROADCAST)) { - printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as " - "it will break before 2.2\n"); -#if 0 return -EPERM; -#endif } } else { if (sk->sk_state != TCP_ESTABLISHED) -- cgit v0.10.2 From 83e0bbcbe2145f160fbaa109b0439dae7f4a38a9 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 27 Mar 2009 00:28:21 -0700 Subject: af_rose/x25: Sanity check the maximum user frame size Otherwise we can wrap the sizes and end up sending garbage. Closes #10423 Signed-off-by: Alan Cox Signed-off-by: David S. Miller diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 6d9c58e..d1c16bb 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1086,7 +1086,11 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n"); - /* Build a packet */ + /* Build a packet - the conventional user limit is 236 bytes. We can + do ludicrously large NetROM frames but must not overflow */ + if (len > 65536) + return -EMSGSIZE; + SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 6501396..0f36e8d 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, /* Build a packet */ SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); + /* Sanity check the packet size */ + if (len > 65535) + return -EMSGSIZE; + size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 9ca17b1..ed80af8 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1035,6 +1035,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, sx25.sx25_addr = x25->dest_addr; } + /* Sanity check the packet size */ + if (len > 65535) { + rc = -EMSGSIZE; + goto out; + } + SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); /* Build a packet */ -- cgit v0.10.2 From 54dc79fe0d895758bdaa1dcf8512d3d21263d105 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 27 Mar 2009 00:38:45 -0700 Subject: gianfar: fix headroom expansion code The code that was added to increase headroom was wrong. It doesn't handle the case where gfar_add_fcb() changes the skb. Better to do check at start of transmit (outside of lock), where error handling is better anyway. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 9d81e7a..a7a6737 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1239,19 +1239,9 @@ static int gfar_enet_open(struct net_device *dev) return err; } -static inline struct txfcb *gfar_add_fcb(struct sk_buff **skbp) +static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) { - struct txfcb *fcb; - struct sk_buff *skb = *skbp; - - if (unlikely(skb_headroom(skb) < GMAC_FCB_LEN)) { - struct sk_buff *old_skb = skb; - skb = skb_realloc_headroom(old_skb, GMAC_FCB_LEN); - if (!skb) - return NULL; - dev_kfree_skb_any(old_skb); - } - fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); + struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); cacheable_memzero(fcb, GMAC_FCB_LEN); return fcb; @@ -1320,6 +1310,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) base = priv->tx_bd_base; + /* make space for additional header */ + if (skb_headroom(skb) < GMAC_FCB_LEN) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); + if (!skb_new) { + dev->stats.tx_errors++; + kfree(skb); + return NETDEV_TX_OK; + } + kfree_skb(skb); + skb = skb_new; + } + /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; @@ -1372,20 +1376,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Set up checksumming */ if (CHECKSUM_PARTIAL == skb->ip_summed) { - fcb = gfar_add_fcb(&skb); - if (likely(fcb != NULL)) { - lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_checksum(skb, fcb); - } + fcb = gfar_add_fcb(skb); + lstatus |= BD_LFLAG(TXBD_TOE); + gfar_tx_checksum(skb, fcb); } if (priv->vlgrp && vlan_tx_tag_present(skb)) { - if (unlikely(NULL == fcb)) - fcb = gfar_add_fcb(&skb); - if (likely(fcb != NULL)) { + if (unlikely(NULL == fcb)) { + fcb = gfar_add_fcb(skb); lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_vlan(skb, fcb); } + + gfar_tx_vlan(skb, fcb); } /* setup the TxBD length and buffer pointer for the first BD */ @@ -1433,7 +1435,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Unlock priv */ spin_unlock_irqrestore(&priv->txlock, flags); - return 0; + return NETDEV_TX_OK; } /* Stops the kernel queue, and halts the controller */ -- cgit v0.10.2 From 58add9fc02e7a9dbab9d80d5fa64f13972e91eb5 Mon Sep 17 00:00:00 2001 From: Steve Glendinning Date: Thu, 26 Mar 2009 07:14:36 +0000 Subject: smsc911x: enforce read-after-write timing restriction on eeprom access The LAN911x datasheet specifies a minimum delay of 45ns between a write of E2P_DATA and any read. This patch adds a single dummy read of BYTE_TEST to enforce this timing constraint. Signed-off-by: Steve Glendinning Signed-off-by: David S. Miller diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index ad3cbc9..af8f60c 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -1680,6 +1680,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, u8 address, u8 data) { u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + u32 temp; int ret; SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data); @@ -1688,6 +1689,10 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, if (!ret) { op = E2P_CMD_EPC_CMD_WRITE_ | address; smsc911x_reg_write(pdata, E2P_DATA, (u32)data); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + ret = smsc911x_eeprom_send_cmd(pdata, op); } -- cgit v0.10.2 From 1ace90fe0a36ae9da0b2e1211e6a30ec244e9bd0 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:15 +0000 Subject: 3c503, smc-ultra: netdev_ops bugs A couple of drivers have leftovers from netdev ops conversion. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c index 5b91a85..4f08bd9 100644 --- a/drivers/net/3c503.c +++ b/drivers/net/3c503.c @@ -353,9 +353,6 @@ el2_probe1(struct net_device *dev, int ioaddr) dev->netdev_ops = &el2_netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = eip_poll; -#endif retval = register_netdev(dev); if (retval) diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c index 2033fee..f56f5e4 100644 --- a/drivers/net/smc-ultra.c +++ b/drivers/net/smc-ultra.c @@ -142,9 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = &ultra_poll; -#endif if (base_addr > 0x1ff) /* Check a single specified location. */ return ultra_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ -- cgit v0.10.2 From cfa8707aa65f7ec8ed2130937810b4fb05b40cfa Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:16 +0000 Subject: uml: convert network device to internal network device stats Convert the UML network device to use internal network device stats. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index fde510b..6d2b100 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -86,7 +86,7 @@ static int uml_net_rx(struct net_device *dev) drop_skb->dev = dev; /* Read a packet into drop_skb and don't do anything with it. */ (*lp->read)(lp->fd, drop_skb, lp); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return 0; } @@ -99,8 +99,8 @@ static int uml_net_rx(struct net_device *dev) skb_trim(skb, pkt_len); skb->protocol = (*lp->protocol)(skb); - lp->stats.rx_bytes += skb->len; - lp->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; netif_rx(skb); return pkt_len; } @@ -224,8 +224,8 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) len = (*lp->write)(lp->fd, skb, lp); if (len == skb->len) { - lp->stats.tx_packets++; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_start_queue(dev); @@ -234,7 +234,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) } else if (len == 0) { netif_start_queue(dev); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; } else { netif_start_queue(dev); @@ -248,12 +248,6 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *uml_net_get_stats(struct net_device *dev) -{ - struct uml_net_private *lp = netdev_priv(dev); - return &lp->stats; -} - static void uml_net_set_multicast_list(struct net_device *dev) { return; @@ -476,7 +470,6 @@ static void eth_configure(int n, void *init, char *mac, dev->open = uml_net_open; dev->hard_start_xmit = uml_net_start_xmit; dev->stop = uml_net_close; - dev->get_stats = uml_net_get_stats; dev->set_multicast_list = uml_net_set_multicast_list; dev->tx_timeout = uml_net_tx_timeout; dev->set_mac_address = uml_net_set_mac; diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h index d843c79..5c367f2 100644 --- a/arch/um/include/shared/net_kern.h +++ b/arch/um/include/shared/net_kern.h @@ -26,7 +26,7 @@ struct uml_net_private { spinlock_t lock; struct net_device *dev; struct timer_list tl; - struct net_device_stats stats; + struct work_struct work; int fd; unsigned char mac[ETH_ALEN]; -- cgit v0.10.2 From 8bb95b39a16ed55226810596f92216c53329d2fe Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:17 +0000 Subject: uml: convert network device to netdevice ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 6d2b100..434224e 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -371,6 +371,18 @@ static void net_device_release(struct device *dev) free_netdev(netdev); } +static const struct net_device_ops uml_netdev_ops = { + .ndo_open = uml_net_open, + .ndo_stop = uml_net_close, + .ndo_start_xmit = uml_net_start_xmit, + .ndo_set_multicast_list = uml_net_set_multicast_list, + .ndo_tx_timeout = uml_net_tx_timeout, + .ndo_set_mac_address = uml_net_set_mac, + .ndo_change_mtu = uml_net_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* * Ensures that platform_driver_register is called only once by * eth_configure. Will be set in an initcall. @@ -467,13 +479,7 @@ static void eth_configure(int n, void *init, char *mac, set_ether_mac(dev, device->mac); dev->mtu = transport->user->mtu; - dev->open = uml_net_open; - dev->hard_start_xmit = uml_net_start_xmit; - dev->stop = uml_net_close; - dev->set_multicast_list = uml_net_set_multicast_list; - dev->tx_timeout = uml_net_tx_timeout; - dev->set_mac_address = uml_net_set_mac; - dev->change_mtu = uml_net_change_mtu; + dev->netdev_ops = ¨_netdev_ops; dev->ethtool_ops = ¨_net_ethtool_ops; dev->watchdog_timeo = (HZ >> 1); dev->irq = UM_ETH_IRQ; -- cgit v0.10.2 From 8bbce3f61b63c7502b1d6228d9f59d3c6423a759 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:18 +0000 Subject: appletalk: convert cops to internal net_device_stats Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 54819a3..89632c5 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -171,7 +171,6 @@ static unsigned int cops_debug = COPS_DEBUG; struct cops_local { - struct net_device_stats stats; int board; /* Holds what board type is. */ int nodeid; /* Set to 1 once have nodeid. */ unsigned char node_acquire; /* Node ID when acquired. */ @@ -197,7 +196,6 @@ static int cops_send_packet (struct sk_buff *skb, struct net_device *dev); static void set_multicast_list (struct net_device *dev); static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int cops_close (struct net_device *dev); -static struct net_device_stats *cops_get_stats (struct net_device *dev); static void cleanup_card(struct net_device *dev) { @@ -337,7 +335,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->tx_timeout = cops_timeout; dev->watchdog_timeo = HZ * 2; - dev->get_stats = cops_get_stats; dev->open = cops_open; dev->stop = cops_close; dev->do_ioctl = cops_ioctl; @@ -797,7 +794,7 @@ static void cops_rx(struct net_device *dev) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; while(pkt_len--) /* Discard packet */ inb(ioaddr); spin_unlock_irqrestore(&lp->lock, flags); @@ -819,7 +816,7 @@ static void cops_rx(struct net_device *dev) { printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", dev->name, pkt_len); - lp->stats.tx_errors++; + dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } @@ -836,7 +833,7 @@ static void cops_rx(struct net_device *dev) if(rsp_type != LAP_RESPONSE) { printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); - lp->stats.tx_errors++; + dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } @@ -846,8 +843,8 @@ static void cops_rx(struct net_device *dev) skb_reset_transport_header(skb); /* Point to data (Skip header). */ /* Update the counters. */ - lp->stats.rx_packets++; - lp->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; /* Send packet to a higher place. */ netif_rx(skb); @@ -858,7 +855,7 @@ static void cops_timeout(struct net_device *dev) struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; - lp->stats.tx_errors++; + dev->stats.tx_errors++; if(lp->board==TANGENT) { if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) @@ -916,8 +913,8 @@ static int cops_send_packet(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ /* Done sending packet, update counters and cleanup. */ - lp->stats.tx_packets++; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; dev_kfree_skb (skb); return 0; @@ -986,15 +983,6 @@ static int cops_close(struct net_device *dev) return 0; } -/* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *cops_get_stats(struct net_device *dev) -{ - struct cops_local *lp = netdev_priv(dev); - return &lp->stats; -} #ifdef MODULE static struct net_device *cops_dev; -- cgit v0.10.2 From c2839d433de2fee2216b41ef8222708949bf989f Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:19 +0000 Subject: appltetalk: convert cops device to net_device ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 89632c5..7f83254 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -258,6 +258,15 @@ out: return ERR_PTR(err); } +static const struct net_device_ops cops_netdev_ops = { + .ndo_open = cops_open, + .ndo_stop = cops_close, + .ndo_start_xmit = cops_send_packet, + .ndo_tx_timeout = cops_timeout, + .ndo_do_ioctl = cops_ioctl, + .ndo_set_multicast_list = set_multicast_list, +}; + /* * This is the real probe routine. Linux has a history of friendly device * probes on the ISA bus. A good device probes avoids doing writes, and @@ -331,15 +340,9 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) /* Copy local board variable to lp struct. */ lp->board = board; - dev->hard_start_xmit = cops_send_packet; - dev->tx_timeout = cops_timeout; + dev->netdev_ops = &cops_netdev_ops; dev->watchdog_timeo = HZ * 2; - dev->open = cops_open; - dev->stop = cops_close; - dev->do_ioctl = cops_ioctl; - dev->set_multicast_list = set_multicast_list; - dev->mc_list = NULL; /* Tell the user where the card is and what mode we're in. */ if(board==DAYNA) -- cgit v0.10.2 From 4fafc12328a4e2d4afbc4541c46be014e22c5b66 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:20 +0000 Subject: appletalk: convert LTPC to use internal net_device_stats Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index dc4d496..74d9787 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -261,7 +261,6 @@ static unsigned char *ltdmacbuf; struct ltpc_private { - struct net_device_stats stats; struct atalk_addr my_addr; }; @@ -699,7 +698,6 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen, static struct timer_list ltpc_timer; static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *ltpc_get_stats(struct net_device *dev); static int read_30 ( struct net_device *dev) { @@ -726,8 +724,6 @@ static int sendup_buffer (struct net_device *dev) int dnode, snode, llaptype, len; int sklen; struct sk_buff *skb; - struct ltpc_private *ltpc_priv = netdev_priv(dev); - struct net_device_stats *stats = <pc_priv->stats; struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; if (ltc->command != LT_RCVLAP) { @@ -779,8 +775,8 @@ static int sendup_buffer (struct net_device *dev) skb_reset_transport_header(skb); - stats->rx_packets++; - stats->rx_bytes+=skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; /* toss it onwards */ netif_rx(skb); @@ -904,10 +900,6 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) /* in kernel 1.3.xx, on entry skb->data points to ddp header, * and skb->len is the length of the ddp data + ddp header */ - - struct ltpc_private *ltpc_priv = netdev_priv(dev); - struct net_device_stats *stats = <pc_priv->stats; - int i; struct lt_sendlap cbuf; unsigned char *hdr; @@ -936,20 +928,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) printk("\n"); } - stats->tx_packets++; - stats->tx_bytes+=skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; dev_kfree_skb(skb); return 0; } -static struct net_device_stats *ltpc_get_stats(struct net_device *dev) -{ - struct ltpc_private *ltpc_priv = netdev_priv(dev); - struct net_device_stats *stats = <pc_priv->stats; - return stats; -} - /* initialization stuff */ static int __init ltpc_probe_dma(int base, int dma) @@ -1135,7 +1120,6 @@ struct net_device * __init ltpc_probe(void) /* Fill in the fields of the device structure with ethernet-generic values. */ dev->hard_start_xmit = ltpc_xmit; - dev->get_stats = ltpc_get_stats; /* add the ltpc-specific things */ dev->do_ioctl = <pc_ioctl; -- cgit v0.10.2 From 816b26f500e9d78ccd56e1c8ffac85f5d8765c00 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:21 +0000 Subject: appletalk: convert LTPC to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 74d9787..78cc714 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1012,6 +1012,12 @@ static int __init ltpc_probe_dma(int base, int dma) return (want & 2) ? 3 : 1; } +static const struct net_device_ops ltpc_netdev = { + .ndo_start_xmit = ltpc_xmit, + .ndo_do_ioctl = ltpc_ioctl, + .ndo_set_multicast_list = set_multicast_list, +}; + struct net_device * __init ltpc_probe(void) { struct net_device *dev; @@ -1118,13 +1124,7 @@ struct net_device * __init ltpc_probe(void) else printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); - /* Fill in the fields of the device structure with ethernet-generic values. */ - dev->hard_start_xmit = ltpc_xmit; - - /* add the ltpc-specific things */ - dev->do_ioctl = <pc_ioctl; - - dev->set_multicast_list = &set_multicast_list; + dev->netdev_ops = <pc_netdev; dev->mc_list = NULL; dev->base_addr = io; dev->irq = irq; -- cgit v0.10.2 From ddec2c89f89b9f2d15ddff07f01570123e95f544 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:22 +0000 Subject: IRDA: convert donauboe to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 6f3e7f7..6b6548b 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1524,6 +1524,13 @@ toshoboe_close (struct pci_dev *pci_dev) free_netdev(self->netdev); } +static const struct net_device_ops toshoboe_netdev_ops = { + .ndo_open = toshoboe_net_open, + .ndo_stop = toshoboe_net_close, + .ndo_start_xmit = toshoboe_hard_xmit, + .ndo_do_ioctl = toshoboe_net_ioctl, +}; + static int toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) { @@ -1657,10 +1664,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) #endif SET_NETDEV_DEV(dev, &pci_dev->dev); - dev->hard_start_xmit = toshoboe_hard_xmit; - dev->open = toshoboe_net_open; - dev->stop = toshoboe_net_close; - dev->do_ioctl = toshoboe_net_ioctl; + dev->netdev_ops = &toshoboe_netdev_ops; err = register_netdev(dev); if (err) -- cgit v0.10.2 From 79f8ae3aa27f2f17a50ad1deee1e16ff02be01bb Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:23 +0000 Subject: tokenring: convert drivers to net_device_ops Convert madge and proteon drivers which are really just subclasses of tms380. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c index 1933081..456f8bf 100644 --- a/drivers/net/tokenring/madgemc.c +++ b/drivers/net/tokenring/madgemc.c @@ -142,7 +142,7 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign return; } - +static struct net_device_ops madgemc_netdev_ops __read_mostly; static int __devinit madgemc_probe(struct device *device) { @@ -168,7 +168,7 @@ static int __devinit madgemc_probe(struct device *device) goto getout; } - dev->dma = 0; + dev->netdev_ops = &madgemc_netdev_ops; card = kmalloc(sizeof(struct card_info), GFP_KERNEL); if (card==NULL) { @@ -348,9 +348,6 @@ static int __devinit madgemc_probe(struct device *device) memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1); - dev->open = madgemc_open; - dev->stop = madgemc_close; - tp->tmspriv = card; dev_set_drvdata(device, dev); @@ -758,6 +755,10 @@ static struct mca_driver madgemc_driver = { static int __init madgemc_init (void) { + madgemc_netdev_ops = tms380tr_netdev_ops; + madgemc_netdev_ops.ndo_open = madgemc_open; + madgemc_netdev_ops.ndo_stop = madgemc_close; + return mca_register_driver (&madgemc_driver); } diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c index b8c955f..16e8783 100644 --- a/drivers/net/tokenring/proteon.c +++ b/drivers/net/tokenring/proteon.c @@ -116,6 +116,8 @@ nodev: return -ENODEV; } +static struct net_device_ops proteon_netdev_ops __read_mostly; + static int __init setup_card(struct net_device *dev, struct device *pdev) { struct net_local *tp; @@ -167,8 +169,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) tp->tmspriv = NULL; - dev->open = proteon_open; - dev->stop = tms380tr_close; + dev->netdev_ops = &proteon_netdev_ops; if (dev->irq == 0) { @@ -352,6 +353,10 @@ static int __init proteon_init(void) struct platform_device *pdev; int i, num = 0, err = 0; + proteon_netdev_ops = tms380tr_netdev_ops; + proteon_netdev_ops.ndo_open = proteon_open; + proteon_netdev_ops.ndo_stop = tms380tr_close; + err = platform_driver_register(&proteon_driver); if (err) return err; diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c index c0f58f0..46db5c5 100644 --- a/drivers/net/tokenring/skisa.c +++ b/drivers/net/tokenring/skisa.c @@ -133,6 +133,8 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr) return 0; } +static struct net_device_ops sk_isa_netdev_ops __read_mostly; + static int __init setup_card(struct net_device *dev, struct device *pdev) { struct net_local *tp; @@ -184,8 +186,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) tp->tmspriv = NULL; - dev->open = sk_isa_open; - dev->stop = tms380tr_close; + dev->netdev_ops = &sk_isa_netdev_ops; if (dev->irq == 0) { @@ -362,6 +363,10 @@ static int __init sk_isa_init(void) struct platform_device *pdev; int i, num = 0, err = 0; + sk_isa_netdev_ops = tms380tr_netdev_ops; + sk_isa_netdev_ops.ndo_open = sk_isa_open; + sk_isa_netdev_ops.ndo_stop = tms380tr_close; + err = platform_driver_register(&sk_isa_driver); if (err) return err; -- cgit v0.10.2 From f70d59492ed8bc1d74b364ebe2b97ef6705910b1 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:24 +0000 Subject: tokenring: convert smctr to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index 9d7db2c..a91d9c5 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -124,7 +124,6 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev); static int smctr_get_physical_drop_number(struct net_device *dev); static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue); static int smctr_get_station_id(struct net_device *dev); -static struct net_device_stats *smctr_get_stats(struct net_device *dev); static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, __u16 bytes_count); static int smctr_get_upstream_neighbor_addr(struct net_device *dev); @@ -3633,6 +3632,14 @@ out: return ERR_PTR(err); } +static const struct net_device_ops smctr_netdev_ops = { + .ndo_open = smctr_open, + .ndo_stop = smctr_close, + .ndo_start_xmit = smctr_send_packet, + .ndo_tx_timeout = smctr_timeout, + .ndo_get_stats = smctr_get_stats, + .ndo_set_multicast_list = smctr_set_multicast_list, +}; static int __init smctr_probe1(struct net_device *dev, int ioaddr) { @@ -3683,13 +3690,8 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr) (unsigned int)dev->base_addr, dev->irq, tp->rom_base, tp->ram_base); - dev->open = smctr_open; - dev->stop = smctr_close; - dev->hard_start_xmit = smctr_send_packet; - dev->tx_timeout = smctr_timeout; + dev->netdev_ops = &smctr_netdev_ops; dev->watchdog_timeo = HZ; - dev->get_stats = smctr_get_stats; - dev->set_multicast_list = &smctr_set_multicast_list; return (0); out: -- cgit v0.10.2 From ac99533fb716171db12798039671f19631cf3586 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:25 +0000 Subject: wan: convert sdla driver to net_device_ops Also use internal net_device_stats Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 6a07ba9..1d637f4 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -714,19 +714,19 @@ static int sdla_transmit(struct sk_buff *skb, struct net_device *dev) switch (ret) { case SDLA_RET_OK: - flp->stats.tx_packets++; + dev->stats.tx_packets++; ret = DLCI_RET_OK; break; case SDLA_RET_CIR_OVERFLOW: case SDLA_RET_BUF_OVERSIZE: case SDLA_RET_NO_BUFS: - flp->stats.tx_dropped++; + dev->stats.tx_dropped++; ret = DLCI_RET_DROP; break; default: - flp->stats.tx_errors++; + dev->stats.tx_errors++; ret = DLCI_RET_ERR; break; } @@ -807,7 +807,7 @@ static void sdla_receive(struct net_device *dev) if (i == CONFIG_DLCI_MAX) { printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci); - flp->stats.rx_errors++; + dev->stats.rx_errors++; success = 0; } } @@ -819,7 +819,7 @@ static void sdla_receive(struct net_device *dev) if (skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - flp->stats.rx_dropped++; + dev->stats.rx_dropped++; success = 0; } else @@ -859,7 +859,7 @@ static void sdla_receive(struct net_device *dev) if (success) { - flp->stats.rx_packets++; + dev->stats.rx_packets++; dlp = netdev_priv(master); (*dlp->receive)(skb, master); } @@ -1590,13 +1590,14 @@ fail: return err; } -static struct net_device_stats *sdla_stats(struct net_device *dev) -{ - struct frad_local *flp; - flp = netdev_priv(dev); - - return(&flp->stats); -} +static const struct net_device_ops sdla_netdev_ops = { + .ndo_open = sdla_open, + .ndo_stop = sdla_close, + .ndo_do_ioctl = sdla_ioctl, + .ndo_set_config = sdla_set_config, + .ndo_start_xmit = sdla_transmit, + .ndo_change_mtu = sdla_change_mtu, +}; static void setup_sdla(struct net_device *dev) { @@ -1604,20 +1605,13 @@ static void setup_sdla(struct net_device *dev) netdev_boot_setup_check(dev); + dev->netdev_ops = &sdla_netdev_ops; dev->flags = 0; dev->type = 0xFFFF; dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = SDLA_MAX_MTU; - dev->open = sdla_open; - dev->stop = sdla_close; - dev->do_ioctl = sdla_ioctl; - dev->set_config = sdla_set_config; - dev->get_stats = sdla_stats; - dev->hard_start_xmit = sdla_transmit; - dev->change_mtu = sdla_change_mtu; - flp->activate = sdla_activate; flp->deactivate = sdla_deactivate; flp->assoc = sdla_assoc; diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 60e16a5..673f220 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -153,7 +153,6 @@ struct frhdr struct dlci_local { - struct net_device_stats stats; struct net_device *master; struct net_device *slave; struct dlci_conf config; -- cgit v0.10.2 From 8fdcf1aba33a61475d411d91ab47c2c9adb3e4c3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:26 +0000 Subject: wireless: convert arlan to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index bfca15d..5b9f1e0 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c @@ -1030,7 +1030,17 @@ static int arlan_mac_addr(struct net_device *dev, void *p) return 0; } - +static const struct net_device_ops arlan_netdev_ops = { + .ndo_open = arlan_open, + .ndo_stop = arlan_close, + .ndo_start_xmit = arlan_tx, + .ndo_get_stats = arlan_statistics, + .ndo_set_multicast_list = arlan_set_multicast, + .ndo_change_mtu = arlan_change_mtu, + .ndo_set_mac_address = arlan_mac_addr, + .ndo_tx_timeout = arlan_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +}; static int __init arlan_setup_device(struct net_device *dev, int num) { @@ -1042,14 +1052,7 @@ static int __init arlan_setup_device(struct net_device *dev, int num) ap->conf = (struct arlan_shmem *)(ap+1); dev->tx_queue_len = tx_queue_len; - dev->open = arlan_open; - dev->stop = arlan_close; - dev->hard_start_xmit = arlan_tx; - dev->get_stats = arlan_statistics; - dev->set_multicast_list = arlan_set_multicast; - dev->change_mtu = arlan_change_mtu; - dev->set_mac_address = arlan_mac_addr; - dev->tx_timeout = arlan_tx_timeout; + dev->netdev_ops = &arlan_netdev_ops; dev->watchdog_timeo = 3*HZ; ap->irq_test_done = 0; -- cgit v0.10.2 From 0687478a9977db0698d40f0024d4b06fd42e01a0 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:27 +0000 Subject: wireless: convert wavelan to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index b728541..3ab3eb9 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -735,9 +735,9 @@ if (lp->tx_n_in_use > 0) if (tx_status & AC_SFLD_OK) { int ncollisions; - lp->stats.tx_packets++; + dev->stats.tx_packets++; ncollisions = tx_status & AC_SFLD_MAXCOL; - lp->stats.collisions += ncollisions; + dev->stats.collisions += ncollisions; #ifdef DEBUG_TX_INFO if (ncollisions > 0) printk(KERN_DEBUG @@ -745,9 +745,9 @@ if (lp->tx_n_in_use > 0) dev->name, ncollisions); #endif } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & AC_SFLD_S10) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; #ifdef DEBUG_TX_FAIL printk(KERN_DEBUG "%s: wv_complete(): tx error: no CS.\n", @@ -755,7 +755,7 @@ if (lp->tx_n_in_use > 0) #endif } if (tx_status & AC_SFLD_S9) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; #ifdef DEBUG_TX_FAIL printk(KERN_DEBUG "%s: wv_complete(): tx error: lost CTS.\n", @@ -763,7 +763,7 @@ if (lp->tx_n_in_use > 0) #endif } if (tx_status & AC_SFLD_S8) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; #ifdef DEBUG_TX_FAIL printk(KERN_DEBUG "%s: wv_complete(): tx error: slow DMA.\n", @@ -771,7 +771,7 @@ if (lp->tx_n_in_use > 0) #endif } if (tx_status & AC_SFLD_S6) { - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; #ifdef DEBUG_TX_FAIL printk(KERN_DEBUG "%s: wv_complete(): tx error: heart beat.\n", @@ -779,7 +779,7 @@ if (lp->tx_n_in_use > 0) #endif } if (tx_status & AC_SFLD_S5) { - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; #ifdef DEBUG_TX_FAIL printk(KERN_DEBUG "%s: wv_complete(): tx error: too many collisions.\n", @@ -1346,20 +1346,6 @@ static void wv_init_info(struct net_device * dev) * or wireless extensions */ -/*------------------------------------------------------------------*/ -/* - * Get the current Ethernet statistics. This may be called with the - * card open or closed. - * Used when the user read /proc/net/dev - */ -static en_stats *wavelan_get_stats(struct net_device * dev) -{ -#ifdef DEBUG_IOCTL_TRACE - printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name); -#endif - - return &((net_local *)netdev_priv(dev))->stats; -} /*------------------------------------------------------------------*/ /* @@ -2466,7 +2452,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n", dev->name, sksize); #endif - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -2526,8 +2512,8 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) netif_rx(skb); /* Keep statistics up to date */ - lp->stats.rx_packets++; - lp->stats.rx_bytes += sksize; + dev->stats.rx_packets++; + dev->stats.rx_bytes += sksize; #ifdef DEBUG_RX_TRACE printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name); @@ -2608,7 +2594,7 @@ static void wv_receive(struct net_device * dev) #endif } else { /* If reception was no successful */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; #ifdef DEBUG_RX_INFO printk(KERN_DEBUG @@ -2624,7 +2610,7 @@ static void wv_receive(struct net_device * dev) #endif if ((fd.fd_status & FD_STATUS_S7) != 0) { - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; #ifdef DEBUG_RX_FAIL printk(KERN_DEBUG "%s: wv_receive(): frame too short.\n", @@ -2633,7 +2619,7 @@ static void wv_receive(struct net_device * dev) } if ((fd.fd_status & FD_STATUS_S8) != 0) { - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; #ifdef DEBUG_RX_FAIL printk(KERN_DEBUG "%s: wv_receive(): rx DMA overrun.\n", @@ -2642,7 +2628,7 @@ static void wv_receive(struct net_device * dev) } if ((fd.fd_status & FD_STATUS_S9) != 0) { - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; #ifdef DEBUG_RX_FAIL printk(KERN_DEBUG "%s: wv_receive(): ran out of resources.\n", @@ -2651,7 +2637,7 @@ static void wv_receive(struct net_device * dev) } if ((fd.fd_status & FD_STATUS_S10) != 0) { - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; #ifdef DEBUG_RX_FAIL printk(KERN_DEBUG "%s: wv_receive(): alignment error.\n", @@ -2660,7 +2646,7 @@ static void wv_receive(struct net_device * dev) } if ((fd.fd_status & FD_STATUS_S11) != 0) { - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; #ifdef DEBUG_RX_FAIL printk(KERN_DEBUG "%s: wv_receive(): CRC error.\n", @@ -2826,7 +2812,7 @@ static int wv_packet_write(struct net_device * dev, void *buf, short length) dev->trans_start = jiffies; /* Keep stats up to date. */ - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; if (lp->tx_first_in_use == I82586NULL) lp->tx_first_in_use = txblock; @@ -4038,6 +4024,22 @@ static int wavelan_close(struct net_device * dev) return 0; } +static const struct net_device_ops wavelan_netdev_ops = { + .ndo_open = wavelan_open, + .ndo_stop = wavelan_close, + .ndo_start_xmit = wavelan_packet_xmit, + .ndo_set_multicast_list = wavelan_set_multicast_list, + .ndo_tx_timeout = wavelan_watchdog, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +#ifdef SET_MAC_ADDRESS + .ndo_set_mac_address = wavelan_set_mac_address +#else + .ndo_set_mac_address = eth_mac_addr, +#endif +}; + + /*------------------------------------------------------------------*/ /* * Probe an I/O address, and if the WaveLAN is there configure the @@ -4130,17 +4132,8 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr) /* Init spinlock */ spin_lock_init(&lp->spinlock); - dev->open = wavelan_open; - dev->stop = wavelan_close; - dev->hard_start_xmit = wavelan_packet_xmit; - dev->get_stats = wavelan_get_stats; - dev->set_multicast_list = &wavelan_set_multicast_list; - dev->tx_timeout = &wavelan_watchdog; - dev->watchdog_timeo = WATCHDOG_JIFFIES; -#ifdef SET_MAC_ADDRESS - dev->set_mac_address = &wavelan_set_mac_address; -#endif /* SET_MAC_ADDRESS */ - + dev->netdev_ops = &wavelan_netdev_ops; + dev->watchdog_timeo = WATCHDOG_JIFFIES; dev->wireless_handlers = &wavelan_handler_def; lp->wireless_data.spy_data = &lp->spy_data; dev->wireless_data = &lp->wireless_data; diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h index 44d31bb..2daa021 100644 --- a/drivers/net/wireless/wavelan.p.h +++ b/drivers/net/wireless/wavelan.p.h @@ -459,11 +459,9 @@ static const char *version = "wavelan.c : v24 (SMP + wireless extensions) 11/12/ /****************************** TYPES ******************************/ /* Shortcuts */ -typedef struct net_device_stats en_stats; typedef struct iw_statistics iw_stats; typedef struct iw_quality iw_qual; -typedef struct iw_freq iw_freq; -typedef struct net_local net_local; +typedef struct iw_freq iw_freq;typedef struct net_local net_local; typedef struct timer_list timer_list; /* Basic types */ @@ -475,15 +473,12 @@ typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */ * For each network interface, Linux keeps data in two structures: "device" * keeps the generic data (same format for everybody) and "net_local" keeps * additional specific data. - * Note that some of this specific data is in fact generic (en_stats, for - * example). */ struct net_local { net_local * next; /* linked list of the devices */ struct net_device * dev; /* reverse link */ spinlock_t spinlock; /* Serialize access to the hardware (SMP) */ - en_stats stats; /* Ethernet interface statistics */ int nresets; /* number of hardware resets */ u_char reconfig_82586; /* We need to reconfigure the controller. */ u_char promiscuous; /* promiscuous mode */ @@ -601,8 +596,6 @@ static void static inline void wv_init_info(struct net_device *); /* display startup info */ /* ------------------- IOCTL, STATS & RECONFIG ------------------- */ -static en_stats * - wavelan_get_stats(struct net_device *); /* Give stats /proc/net/dev */ static iw_stats * wavelan_get_wireless_stats(struct net_device *); static void -- cgit v0.10.2 From b20417db3149a9de87e4b4594bcf5f713031c507 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:28 +0000 Subject: netdev: seeq8005 convert to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 12a8fff..ebbbe09 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -143,6 +143,17 @@ out: return ERR_PTR(err); } +static const struct net_device_ops seeq8005_netdev_ops = { + .ndo_open = seeq8005_open, + .ndo_stop = seeq8005_close, + .ndo_start_xmit = seeq8005_send_packet, + .ndo_tx_timeout = seeq8005_timeout, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* This is the real probe routine. Linux has a history of friendly device probes on the ISA bus. A good device probes avoids doing writes, and verifies that the correct device exists and functions. */ @@ -332,12 +343,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) } } #endif - dev->open = seeq8005_open; - dev->stop = seeq8005_close; - dev->hard_start_xmit = seeq8005_send_packet; - dev->tx_timeout = seeq8005_timeout; + dev->netdev_ops = &seeq8005_netdev_ops; dev->watchdog_timeo = HZ/20; - dev->set_multicast_list = set_multicast_list; dev->flags &= ~IFF_MULTICAST; return 0; -- cgit v0.10.2 From 32670c36d0222e2fdfa9673bb878e0f347411cd4 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:29 +0000 Subject: netdev: smc9194 convert to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index 18d653b..9a7973a 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c @@ -831,6 +831,17 @@ static int __init smc_findirq(int ioaddr) #endif } +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_wait_to_send_packet, + .ndo_tx_timeout = smc_timeout, + .ndo_set_multicast_list = smc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /*---------------------------------------------------------------------- . Function: smc_probe( int ioaddr ) . @@ -1044,12 +1055,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) goto err_out; } - dev->open = smc_open; - dev->stop = smc_close; - dev->hard_start_xmit = smc_wait_to_send_packet; - dev->tx_timeout = smc_timeout; + dev->netdev_ops = &smc_netdev_ops; dev->watchdog_timeo = HZ/20; - dev->set_multicast_list = smc_set_multicast_list; return 0; -- cgit v0.10.2 From 5f352f9a1c8d53270970f4efcf5496cb9b01c4a8 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:30 +0000 Subject: netdev: smc-ultra32 convert to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c index cb6c097..7a554ad 100644 --- a/drivers/net/smc-ultra32.c +++ b/drivers/net/smc-ultra32.c @@ -153,6 +153,22 @@ out: return ERR_PTR(err); } + +static const struct net_device_ops ultra32_netdev_ops = { + .ndo_open = ultra32_open, + .ndo_stop = ultra32_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + static int __init ultra32_probe1(struct net_device *dev, int ioaddr) { int i, edge, media, retval; @@ -273,11 +289,8 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr) ei_status.block_output = &ultra32_block_output; ei_status.get_8390_hdr = &ultra32_get_8390_hdr; ei_status.reset_8390 = &ultra32_reset_8390; - dev->open = &ultra32_open; - dev->stop = &ultra32_close; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = ei_poll; -#endif + + dev->netdev_ops = &ultra32_netdev_ops; NS8390_init(dev, 0); return 0; -- cgit v0.10.2 From 06e884031702f06b32ce20750ab3c46c596dc776 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:31 +0000 Subject: netdev: smc-ultra fix netpoll net_device_ops conversion left the wrong poll_controller hook. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c index f56f5e4..0291ea0 100644 --- a/drivers/net/smc-ultra.c +++ b/drivers/net/smc-ultra.c @@ -196,7 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = ultra_poll, #endif }; -- cgit v0.10.2 From 462540bdb2f08d465a2416764fc628520f82939f Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:32 +0000 Subject: lance: convert to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/lance.c b/drivers/net/lance.c index d83d401..633808d 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -454,6 +454,18 @@ out: } #endif +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_start_xmit = lance_start_xmit, + .ndo_stop = lance_close, + .ndo_get_stats = lance_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options) { struct lance_private *lp; @@ -714,12 +726,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int printk(version); /* The LANCE-specific entries in the device structure. */ - dev->open = lance_open; - dev->hard_start_xmit = lance_start_xmit; - dev->stop = lance_close; - dev->get_stats = lance_get_stats; - dev->set_multicast_list = set_multicast_list; - dev->tx_timeout = lance_tx_timeout; + dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; err = register_netdev(dev); -- cgit v0.10.2 From d9c6d50d8dae755fe136e9cfdd137f856f60af4b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:33 +0000 Subject: netdev: ibmlana convert to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 5b5bf9f..c25bc0b 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c @@ -905,6 +905,17 @@ static char *ibmlana_adapter_names[] __devinitdata = { NULL }; + +static const struct net_device_ops ibmlana_netdev_ops = { + .ndo_open = ibmlana_open, + .ndo_stop = ibmlana_close, + .ndo_start_xmit = ibmlana_tx, + .ndo_set_multicast_list = ibmlana_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __devinit ibmlana_init_one(struct device *kdev) { struct mca_device *mdev = to_mca_device(kdev); @@ -973,11 +984,7 @@ static int __devinit ibmlana_init_one(struct device *kdev) mca_device_set_claim(mdev, 1); /* set methods */ - - dev->open = ibmlana_open; - dev->stop = ibmlana_close; - dev->hard_start_xmit = ibmlana_tx; - dev->set_multicast_list = ibmlana_set_multicast_list; + dev->netdev_ops = &ibmlana_netdev_ops; dev->flags |= IFF_MULTICAST; /* copy out MAC address */ -- cgit v0.10.2 From 8a5f7dafbc92b6e87bd02b9d5b2b58da4f5bb4c3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:34 +0000 Subject: netdev: convert eexpress to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 9ff3f2f..1686dca 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -1043,6 +1043,17 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, lp->last_tx = jiffies; } +static const struct net_device_ops eexp_netdev_ops = { + .ndo_open = eexp_open, + .ndo_stop = eexp_close, + .ndo_start_xmit = eexp_xmit, + .ndo_set_multicast_list = eexp_set_multicast, + .ndo_tx_timeout = eexp_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* * Sanity check the suspected EtherExpress card * Read hardware address, reset card, size memory and initialize buffer @@ -1163,11 +1174,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE); lp->width = buswidth; - dev->open = eexp_open; - dev->stop = eexp_close; - dev->hard_start_xmit = eexp_xmit; - dev->set_multicast_list = &eexp_set_multicast; - dev->tx_timeout = eexp_timeout; + dev->netdev_ops = &eexp_netdev_ops; dev->watchdog_timeo = 2*HZ; return register_netdev(dev); -- cgit v0.10.2 From 8afb1cebf5e7fde4a1bddacb559bda8526e64144 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:35 +0000 Subject: netdev: convert eexpro to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index e187c88..cc2ab64 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -739,6 +739,17 @@ static void __init eepro_print_info (struct net_device *dev) static const struct ethtool_ops eepro_ethtool_ops; +static const struct net_device_ops eepro_netdev_ops = { + .ndo_open = eepro_open, + .ndo_stop = eepro_close, + .ndo_start_xmit = eepro_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = eepro_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* This is the real probe routine. Linux has a history of friendly device probes on the ISA bus. A good device probe avoids doing writes, and verifies that the correct device exists and functions. */ @@ -851,11 +862,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe) } } - dev->open = eepro_open; - dev->stop = eepro_close; - dev->hard_start_xmit = eepro_send_packet; - dev->set_multicast_list = &set_multicast_list; - dev->tx_timeout = eepro_tx_timeout; + dev->netdev_ops = &eepro_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &eepro_ethtool_ops; -- cgit v0.10.2 From cb0c7005d2dd20499c0855bfcb05272a4d206d23 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:36 +0000 Subject: netdev: convert at1700 to net_device_ops Remove unneeded memset (alloc_etherdev does it already). Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index ced7079..18b566a 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -249,6 +249,17 @@ out: return ERR_PTR(err); } +static const struct net_device_ops at1700_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = net_send_packet, + .ndo_set_multicast_list = set_rx_mode, + .ndo_tx_timeout = net_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* The Fujitsu datasheet suggests that the NIC be probed for by checking its "signature", the default bit pattern after a reset. This *doesn't* work -- there is no way to reset the bus interface without a complete power-cycle! @@ -448,13 +459,7 @@ found: if (net_debug) printk(version); - memset(lp, 0, sizeof(struct net_local)); - - dev->open = net_open; - dev->stop = net_close; - dev->hard_start_xmit = net_send_packet; - dev->set_multicast_list = &set_rx_mode; - dev->tx_timeout = net_tx_timeout; + dev->netdev_ops = &at1700_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; spin_lock_init(&lp->lock); -- cgit v0.10.2 From 361bc03e180c4e27e8f21bc7ea3a8066886f78d5 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:37 +0000 Subject: netdev: convert depca to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 55625db..357f565 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -566,6 +566,18 @@ MODULE_LICENSE("GPL"); outw(CSR0, DEPCA_ADDR);\ outw(STOP, DEPCA_DATA) +static const struct net_device_ops depca_netdev_ops = { + .ndo_open = depca_open, + .ndo_start_xmit = depca_start_xmit, + .ndo_stop = depca_close, + .ndo_set_multicast_list = set_multicast_list, + .ndo_do_ioctl = depca_ioctl, + .ndo_tx_timeout = depca_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init depca_hw_init (struct net_device *dev, struct device *device) { struct depca_private *lp; @@ -793,12 +805,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) } /* The DEPCA-specific entries in the device structure. */ - dev->open = &depca_open; - dev->hard_start_xmit = &depca_start_xmit; - dev->stop = &depca_close; - dev->set_multicast_list = &set_multicast_list; - dev->do_ioctl = &depca_ioctl; - dev->tx_timeout = depca_tx_timeout; + dev->netdev_ops = &depca_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->mem_start = 0; -- cgit v0.10.2 From 968804d9705a014f231eedf82ba9e33810898b68 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:38 +0000 Subject: netdev: convert ewrk3 to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index b852303..1a685a0 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c @@ -388,6 +388,18 @@ static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq) return err; } +static const struct net_device_ops ewrk3_netdev_ops = { + .ndo_open = ewrk3_open, + .ndo_start_xmit = ewrk3_queue_pkt, + .ndo_stop = ewrk3_close, + .ndo_set_multicast_list = set_multicast_list, + .ndo_do_ioctl = ewrk3_ioctl, + .ndo_tx_timeout = ewrk3_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init ewrk3_hw_init(struct net_device *dev, u_long iobase) { @@ -603,16 +615,11 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) printk(version); } /* The EWRK3-specific entries in the device structure. */ - dev->open = ewrk3_open; - dev->hard_start_xmit = ewrk3_queue_pkt; - dev->stop = ewrk3_close; - dev->set_multicast_list = set_multicast_list; - dev->do_ioctl = ewrk3_ioctl; + dev->netdev_ops = &ewrk3_netdev_ops; if (lp->adapter_name[4] == '3') SET_ETHTOOL_OPS(dev, ðtool_ops_203); else SET_ETHTOOL_OPS(dev, ðtool_ops); - dev->tx_timeout = ewrk3_timeout; dev->watchdog_timeo = QUEUE_PKT_TIMEOUT; dev->mem_start = 0; -- cgit v0.10.2 From 2c7669e3a9f748dd35743f06cbdaf4340263c3bf Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:39 +0000 Subject: netdev: convert ni52 to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index a8bcc00..77d44a0 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -441,6 +441,18 @@ out: return ERR_PTR(err); } +static const struct net_device_ops ni52_netdev_ops = { + .ndo_open = ni52_open, + .ndo_stop = ni52_close, + .ndo_get_stats = ni52_get_stats, + .ndo_tx_timeout = ni52_timeout, + .ndo_start_xmit = ni52_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init ni52_probe1(struct net_device *dev, int ioaddr) { int i, size, retval; @@ -561,15 +573,8 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr) printk("IRQ %d (assigned and not checked!).\n", dev->irq); } - dev->open = ni52_open; - dev->stop = ni52_close; - dev->get_stats = ni52_get_stats; - dev->tx_timeout = ni52_timeout; + dev->netdev_ops = &ni52_netdev_ops; dev->watchdog_timeo = HZ/20; - dev->hard_start_xmit = ni52_send_packet; - dev->set_multicast_list = set_multicast_list; - - dev->if_port = 0; return 0; out: -- cgit v0.10.2 From c6bca821e66c5fec8ffc11ff24a82adf338bcf6b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:40 +0000 Subject: netdev: convert ni65 to net_device_ops Also, use internal net_device_stats. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index df5f869..6474f02 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -237,7 +237,7 @@ struct priv void *tmdbounce[TMDNUM]; int tmdbouncenum; int lock,xmit_queued; - struct net_device_stats stats; + void *self; int cmdr_addr; int cardno; @@ -257,7 +257,6 @@ static void ni65_timeout(struct net_device *dev); static int ni65_close(struct net_device *dev); static int ni65_alloc_buffer(struct net_device *dev); static void ni65_free_buffer(struct priv *p); -static struct net_device_stats *ni65_get_stats(struct net_device *); static void set_multicast_list(struct net_device *dev); static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ @@ -401,6 +400,17 @@ out: return ERR_PTR(err); } +static const struct net_device_ops ni65_netdev_ops = { + .ndo_open = ni65_open, + .ndo_stop = ni65_close, + .ndo_start_xmit = ni65_send_packet, + .ndo_tx_timeout = ni65_timeout, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* * this is the real card probe .. */ @@ -549,13 +559,9 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) } dev->base_addr = ioaddr; - dev->open = ni65_open; - dev->stop = ni65_close; - dev->hard_start_xmit = ni65_send_packet; - dev->tx_timeout = ni65_timeout; + dev->netdev_ops = &ni65_netdev_ops; dev->watchdog_timeo = HZ/2; - dev->get_stats = ni65_get_stats; - dev->set_multicast_list = set_multicast_list; + return 0; /* everything is OK */ } @@ -901,13 +907,13 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id) if(debuglevel > 1) printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); if(csr0 & CSR0_BABL) - p->stats.tx_errors++; + dev->stats.tx_errors++; if(csr0 & CSR0_MISS) { int i; for(i=0;irmdhead[i].u.s.status); printk("\n"); - p->stats.rx_errors++; + dev->stats.rx_errors++; } if(csr0 & CSR0_MERR) { if(debuglevel > 1) @@ -997,12 +1003,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0) #endif /* checking some errors */ if(tmdp->status2 & XMIT_RTRY) - p->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if(tmdp->status2 & XMIT_LCAR) - p->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { /* this stops the xmitter */ - p->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if(debuglevel > 0) printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); if(p->features & INIT_RING_BEFORE_START) { @@ -1016,12 +1022,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0) if(debuglevel > 2) printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ - p->stats.tx_errors++; + dev->stats.tx_errors++; tmdp->status2 = 0; } else { - p->stats.tx_bytes -= (short)(tmdp->blen); - p->stats.tx_packets++; + dev->stats.tx_bytes -= (short)(tmdp->blen); + dev->stats.tx_packets++; } #ifdef XMT_VIA_SKB @@ -1057,7 +1063,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) if(!(rmdstat & RCV_ERR)) { if(rmdstat & RCV_START) { - p->stats.rx_length_errors++; + dev->stats.rx_length_errors++; printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); } } @@ -1066,16 +1072,16 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); if(rmdstat & RCV_FRAM) - p->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if(rmdstat & RCV_OFLO) - p->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if(rmdstat & RCV_CRC) - p->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if(rmdstat & RCV_BUF_ERR) - p->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ - p->stats.rx_errors++; + dev->stats.rx_errors++; } else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) { @@ -1106,20 +1112,20 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) skb_put(skb,len); skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); #endif - p->stats.rx_packets++; - p->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); } else { printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); - p->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else { printk(KERN_INFO "%s: received runt packet\n",dev->name); - p->stats.rx_errors++; + dev->stats.rx_errors++; } rmdp->blen = -(R_BUF_SIZE-8); rmdp->mlen = 0; @@ -1213,23 +1219,6 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *ni65_get_stats(struct net_device *dev) -{ - -#if 0 - int i; - struct priv *p = dev->ml_priv; - for(i=0;irmdhead + ((p->rmdnum + i) & (RMDNUM-1)); - printk("%02x ",rmdp->u.s.status); - } - printk("\n"); -#endif - - return &((struct priv *)dev->ml_priv)->stats; -} - static void set_multicast_list(struct net_device *dev) { if(!ni65_lance_reinit(dev)) -- cgit v0.10.2 From 1494f2f5601b3220d55f1fdd8a6f990d41446c59 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:41 +0000 Subject: netdev: convert ac3200 to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index 071a851..eac7338 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c @@ -143,6 +143,22 @@ out: } #endif +static const struct net_device_ops ac_netdev_ops = { + .ndo_open = ac_open, + .ndo_stop = ac_close_card, + + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + static int __init ac_probe1(int ioaddr, struct net_device *dev) { int i, retval; @@ -253,11 +269,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev) ei_status.block_output = &ac_block_output; ei_status.get_8390_hdr = &ac_get_8390_hdr; - dev->open = &ac_open; - dev->stop = &ac_close_card; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = ei_poll; -#endif + dev->netdev_ops = &ac_netdev_ops; NS8390_init(dev, 0); retval = register_netdev(dev); -- cgit v0.10.2 From 635d8ba2ecb614b88ab16cb82c12cb344cab4427 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:42 +0000 Subject: netdev: convert lp486e to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 4d1a059..d44bddb 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c @@ -952,6 +952,17 @@ static void print_eth(char *add) (unsigned char) add[12], (unsigned char) add[13]); } +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init lp486e_probe(struct net_device *dev) { struct i596_private *lp; unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 }; @@ -1014,12 +1025,8 @@ static int __init lp486e_probe(struct net_device *dev) { printk("\n"); /* The LP486E-specific entries in the device structure. */ - dev->open = &i596_open; - dev->stop = &i596_close; - dev->hard_start_xmit = &i596_start_xmit; - dev->set_multicast_list = &set_multicast_list; + dev->netdev_ops = &i596_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->tx_timeout = i596_tx_timeout; #if 0 /* selftest reports 0x320925ae - don't know what that means */ -- cgit v0.10.2 From 15d23e7a9e02c8ecbbf9a855e626707ba839135a Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:43 +0000 Subject: netdev: convert cs89x0 to net_device_ops Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index ff64976..7433b88 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -501,6 +501,21 @@ static void net_poll_controller(struct net_device *dev) } #endif +static const struct net_device_ops net_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_tx_timeout = net_timeout, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = net_poll_controller, +#endif + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + /* This is the real probe routine. Linux has a history of friendly device probes on the ISA bus. A good device probes avoids doing writes, and verifies that the correct device exists and functions. @@ -843,17 +858,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) /* print the ethernet address. */ printk(", MAC %pM", dev->dev_addr); - dev->open = net_open; - dev->stop = net_close; - dev->tx_timeout = net_timeout; - dev->watchdog_timeo = HZ; - dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; - dev->set_multicast_list = set_multicast_list; - dev->set_mac_address = set_mac_address; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = net_poll_controller; -#endif + dev->netdev_ops = &net_ops; + dev->watchdog_timeo = HZ; printk("\n"); if (net_debug) -- cgit v0.10.2 From 6d1ec7812d6350409ecfe7f6dded3a6c801b89d3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Mar 2009 15:11:44 +0000 Subject: netdev: convert eth16i to net_device_ops Also, get rid of unnecessary memset. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 5c048f2..0d8b6da 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -475,6 +475,17 @@ out: } #endif +static const struct net_device_ops eth16i_netdev_ops = { + .ndo_open = eth16i_open, + .ndo_stop = eth16i_close, + .ndo_start_xmit = eth16i_tx, + .ndo_set_multicast_list = eth16i_multicast, + .ndo_tx_timeout = eth16i_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init eth16i_probe1(struct net_device *dev, int ioaddr) { struct eth16i_local *lp = netdev_priv(dev); @@ -549,12 +560,7 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr) BITCLR(ioaddr + CONFIG_REG_1, POWERUP); /* Initialize the device structure */ - memset(lp, 0, sizeof(struct eth16i_local)); - dev->open = eth16i_open; - dev->stop = eth16i_close; - dev->hard_start_xmit = eth16i_tx; - dev->set_multicast_list = eth16i_multicast; - dev->tx_timeout = eth16i_timeout; + dev->netdev_ops = ð16i_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; spin_lock_init(&lp->lock); -- cgit v0.10.2 From 3156378993b0fc0f9f12f5f297f0a9b4c4fe0fc8 Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Thu, 26 Mar 2009 16:39:09 +0000 Subject: cxgb3: start qset timers when setup succeeded Start queue set reclaim timers after the queue sets have been allocated successfully. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 71eaa43..2cf6c92 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -291,6 +291,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id); void t3_sge_start(struct adapter *adap); void t3_sge_stop(struct adapter *adap); +void t3_start_sge_timers(struct adapter *adap); void t3_stop_sge_timers(struct adapter *adap); void t3_free_sge_resources(struct adapter *adap); void t3_sge_err_intr_handler(struct adapter *adapter); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c old mode 100644 new mode 100755 index d8be896..8ad5f32 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -602,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap) &adap->params.sge.qset[qset_idx], ntxq, dev, netdev_get_tx_queue(dev, j)); if (err) { - t3_stop_sge_timers(adap); t3_free_sge_resources(adap); return err; } @@ -1046,6 +1045,8 @@ static int cxgb_up(struct adapter *adap) setup_rss(adap); if (!(adap->flags & NAPI_INIT)) init_napi(adap); + + t3_start_sge_timers(adap); adap->flags |= FULL_INIT_DONE; } diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c old mode 100644 new mode 100755 index a7555cb..fcd1a4f --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -3044,9 +3044,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | V_NEWTIMER(q->rspq.holdoff_tmr)); - mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); - mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); - return 0; err_unlock: @@ -3057,6 +3054,27 @@ err: } /** + * t3_start_sge_timers - start SGE timer call backs + * @adap: the adapter + * + * Starts each SGE queue set's timer call back + */ +void t3_start_sge_timers(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *q = &adap->sge.qs[i]; + + if (q->tx_reclaim_timer.function) + mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (q->rx_reclaim_timer.function) + mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); + } +} + +/** * t3_stop_sge_timers - stop SGE timer call backs * @adap: the adapter * -- cgit v0.10.2 From 3fa58c883d44c50b48f2d57a0bc626a7812b0cae Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Thu, 26 Mar 2009 16:39:14 +0000 Subject: cxgb3: sge setup fixes Enable timestamps, update delayed ack threshold for iSCSI/iWARP traffic Remove the len flag in Tx requests. It might corrupt offload trace packets. Update SGE context setup to avoid potential H/W misprogrammation. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index fcd1a4f..54667f0 100755 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1089,7 +1089,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, struct tx_desc *d = &q->desc[pidx]; struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; - cpl->len = htonl(skb->len | 0x80000000); + cpl->len = htonl(skb->len); cntrl = V_TXPKT_INTF(pi->port_id); if (vlan_tx_tag_present(skb) && pi->vlan_grp) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c old mode 100644 new mode 100755 index ff262a0..7d8fbae --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -2128,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx) static int t3_sge_write_context(struct adapter *adapter, unsigned int id, unsigned int type) { - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + if (type == F_RESPONSEQ) { + /* + * Can't write the Response Queue Context bits for + * Interrupt Armed or the Reserve bits after the chip + * has been initialized out of reset. Writing to these + * bits can confuse the hardware. + */ + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } else { + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } +/** + * clear_sge_ctxt - completely clear an SGE context + * @adapter: the adapter + * @id: the context id + * @type: the context type + * + * Completely clear an SGE context. Used predominantly at post-reset + * initialization. Note in particular that we don't skip writing to any + * "sensitive bits" in the contexts the way that t3_sge_write_context() + * does ... + */ static int clear_sge_ctxt(struct adapter *adap, unsigned int id, unsigned int type) { @@ -2145,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id, t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); - return t3_sge_write_context(adap, id, type); + t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); + return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2729,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p) F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | F_MTUENABLE | V_WINDOWSCALEMODE(1) | - V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1)); + V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | - V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | + V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, F_IPV6ENABLE | F_NICMODE); -- cgit v0.10.2 From 68f40c10292a94762956896d4d320a2620945adc Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Thu, 26 Mar 2009 16:39:19 +0000 Subject: cxgb3: use resource_size_t for mmio declarations Use resource_size_t to declare mmio start and len variables. Print PEX error register after EEH resumed. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 8ad5f32..e2b1193 100755 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2871,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); + CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + t3_resume_ports(adapter); } @@ -3003,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev, static int version_printed; int i, err, pci_using_dac = 0; - unsigned long mmio_start, mmio_len; + resource_size_t mmio_start, mmio_len; const struct adapter_info *ai; struct adapter *adapter = NULL; struct port_info *pi; -- cgit v0.10.2 From 952cdf333f9d1b0b71f1b9a3c5e421a2673ed7de Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Thu, 26 Mar 2009 16:39:24 +0000 Subject: cxgb3: differentiate portx and Tx channels Separate ports from H/W Tx channels. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 9ee021e..e508dc3 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -191,7 +191,8 @@ struct mdio_ops { }; struct adapter_info { - unsigned char nports; /* # of ports */ + unsigned char nports0; /* # of ports on channel 0 */ + unsigned char nports1; /* # of ports on channel 1 */ unsigned char phy_base_addr; /* MDIO PHY base address */ unsigned int gpio_out; /* GPIO output settings */ unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ @@ -422,6 +423,7 @@ struct adapter_params { unsigned short b_wnd[NCCTRL_WIN]; unsigned int nports; /* # of ethernet ports */ + unsigned int chan_map; /* bitmap of in-use Tx channels */ unsigned int stats_update_period; /* MAC stats accumulation period */ unsigned int linkpoll_period; /* link poll period in 0.1s */ unsigned int rev; /* chip revision */ diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c old mode 100755 new mode 100644 index e2b1193..2c2aaa7 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -3086,7 +3086,7 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); - for (i = 0; i < ai->nports; ++i) { + for (i = 0; i < ai->nports0 + ai->nports1; ++i) { struct net_device *netdev; netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); @@ -3176,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev, out_free_dev: iounmap(adapter->regs); - for (i = ai->nports - 1; i >= 0; --i) + for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) if (adapter->port[i]) free_netdev(adapter->port[i]); diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c old mode 100755 new mode 100644 index 7d8fbae..31ed31a --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy) } static const struct adapter_info t3_adap_info[] = { - {2, 0, + {1, 1, 0, F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, &mi1_mdio_ops, "Chelsio PE9000"}, - {2, 0, + {1, 1, 0, F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, &mi1_mdio_ops, "Chelsio T302"}, - {1, 0, + {1, 0, 0, F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T310"}, - {2, 0, + {1, 1, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, @@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = { &mi1_mdio_ext_ops, "Chelsio T320"}, {}, {}, - {1, 0, + {1, 0, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, @@ -3227,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask) } /* - * Perform the bits of HW initialization that are dependent on the number - * of available ports. + * Perform the bits of HW initialization that are dependent on the Tx + * channels being used. */ -static void init_hw_for_avail_ports(struct adapter *adap, int nports) +static void chan_init_hw(struct adapter *adap, unsigned int chan_map) { int i; - if (nports == 1) { + if (chan_map != 3) { /* one channel */ t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); - t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | - F_PORT0ACTIVE | F_ENFORCEPKT); - t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff); - } else { + t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | + (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : + F_TPTXPORT1EN | F_PORT1ACTIVE)); + t3_write_reg(adap, A_PM1_TX_CFG, + chan_map == 1 ? 0xffffffff : 0); + } else { /* two channels */ t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, @@ -3548,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params) t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); t3_write_reg(adapter, A_PM1_RX_MODE, 0); t3_write_reg(adapter, A_PM1_TX_MODE, 0); - init_hw_for_avail_ports(adapter, adapter->params.nports); + chan_init_hw(adapter, adapter->params.chan_map); t3_sge_init(adapter, &adapter->params.sge); t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); @@ -3785,7 +3787,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, get_pci_mode(adapter, &adapter->params.pci); adapter->params.info = ai; - adapter->params.nports = ai->nports; + adapter->params.nports = ai->nports0 + ai->nports1; + adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1); adapter->params.rev = t3_read_reg(adapter, A_PL_REV); /* * We used to only run the "adapter check task" once a second if @@ -3816,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); - p->nchan = ai->nports; + p->nchan = adapter->params.chan_map == 3 ? 2 : 1; p->pmrx_size = t3_mc7_size(&adapter->pmrx); p->pmtx_size = t3_mc7_size(&adapter->pmtx); p->cm_size = t3_mc7_size(&adapter->cm); -- cgit v0.10.2 From 5e68b772e6efd189d6aca76f6872fb75d51ace60 Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Thu, 26 Mar 2009 16:39:29 +0000 Subject: cxgb3: map entire Rx page, feed map+offset to Rx ring. DMA mapping can be expensive in the presence of iommus. Reduce the Rx iommu activity by mapping an entire page, and provide the H/W the mapped address + offset of the current page chunk. Reserve bits at the end of the page to track mapping references, so the page can be unmapped. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 2cf6c92..714df2b 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -85,6 +85,8 @@ struct fl_pg_chunk { struct page *page; void *va; unsigned int offset; + u64 *p_cnt; + DECLARE_PCI_UNMAP_ADDR(mapping); }; struct rx_desc; @@ -101,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */ struct fl_pg_chunk pg_chunk;/* page chunk cache */ unsigned int use_pages; /* whether FL uses pages or sk_buffs */ unsigned int order; /* order of page allocations */ + unsigned int alloc_size; /* size of allocated buffer */ struct rx_desc *desc; /* address of HW Rx descriptor ring */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ dma_addr_t phys_addr; /* physical address of HW ring start */ diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c old mode 100755 new mode 100644 index 54667f0..26d3587 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -50,6 +50,7 @@ #define SGE_RX_COPY_THRES 256 #define SGE_RX_PULL_LEN 128 +#define SGE_PG_RSVD SMP_CACHE_BYTES /* * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs @@ -57,8 +58,10 @@ */ #define FL0_PG_CHUNK_SIZE 2048 #define FL0_PG_ORDER 0 +#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) +#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) #define SGE_RX_DROP_THRES 16 #define RX_RECLAIM_PERIOD (HZ/4) @@ -345,13 +348,21 @@ static inline int should_restart_tx(const struct sge_txq *q) return q->in_use - r < (q->size >> 1); } -static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d) +static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, + struct rx_sw_desc *d) { - if (q->use_pages) { - if (d->pg_chunk.page) - put_page(d->pg_chunk.page); + if (q->use_pages && d->pg_chunk.page) { + (*d->pg_chunk.p_cnt)--; + if (!*d->pg_chunk.p_cnt) + pci_unmap_page(pdev, + pci_unmap_addr(&d->pg_chunk, mapping), + q->alloc_size, PCI_DMA_FROMDEVICE); + + put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { + pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), + q->buf_size, PCI_DMA_FROMDEVICE); kfree_skb(d->skb); d->skb = NULL; } @@ -372,9 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) while (q->credits--) { struct rx_sw_desc *d = &q->sdesc[cidx]; - pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), - q->buf_size, PCI_DMA_FROMDEVICE); - clear_rx_desc(q, d); + + clear_rx_desc(pdev, q, d); if (++cidx == q->size) cidx = 0; } @@ -417,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len, return 0; } -static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, +static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, + unsigned int gen) +{ + d->addr_lo = cpu_to_be32(mapping); + d->addr_hi = cpu_to_be32((u64) mapping >> 32); + wmb(); + d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); + d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; +} + +static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, + struct rx_sw_desc *sd, gfp_t gfp, unsigned int order) { if (!q->pg_chunk.page) { + dma_addr_t mapping; + q->pg_chunk.page = alloc_pages(gfp, order); if (unlikely(!q->pg_chunk.page)) return -ENOMEM; q->pg_chunk.va = page_address(q->pg_chunk.page); + q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - + SGE_PG_RSVD; q->pg_chunk.offset = 0; + mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, + 0, q->alloc_size, PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); } sd->pg_chunk = q->pg_chunk; + prefetch(sd->pg_chunk.p_cnt); + q->pg_chunk.offset += q->buf_size; if (q->pg_chunk.offset == (PAGE_SIZE << order)) q->pg_chunk.page = NULL; @@ -436,6 +467,12 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, q->pg_chunk.va += q->buf_size; get_page(q->pg_chunk.page); } + + if (sd->pg_chunk.offset == 0) + *sd->pg_chunk.p_cnt = 1; + else + *sd->pg_chunk.p_cnt += 1; + return 0; } @@ -460,35 +497,43 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) */ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) { - void *buf_start; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_desc *d = &q->desc[q->pidx]; unsigned int count = 0; while (n--) { + dma_addr_t mapping; int err; if (q->use_pages) { - if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { + if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, + q->order))) { nomem: q->alloc_failed++; break; } - buf_start = sd->pg_chunk.va; + mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + + sd->pg_chunk.offset; + pci_unmap_addr_set(sd, dma_addr, mapping); + + add_one_rx_chunk(mapping, d, q->gen); + pci_dma_sync_single_for_device(adap->pdev, mapping, + q->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); } else { - struct sk_buff *skb = alloc_skb(q->buf_size, gfp); + void *buf_start; + struct sk_buff *skb = alloc_skb(q->buf_size, gfp); if (!skb) goto nomem; sd->skb = skb; buf_start = skb->data; - } - - err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, - adap->pdev); - if (unlikely(err)) { - clear_rx_desc(q, sd); - break; + err = add_one_rx_buf(buf_start, q->buf_size, d, sd, + q->gen, adap->pdev); + if (unlikely(err)) { + clear_rx_desc(adap->pdev, q, sd); + break; + } } d++; @@ -795,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, struct sk_buff *newskb, *skb; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - newskb = skb = q->pg_skb; + dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); + newskb = skb = q->pg_skb; if (!skb && (len <= SGE_RX_COPY_THRES)) { newskb = alloc_skb(len, GFP_ATOMIC); if (likely(newskb != NULL)) { __skb_put(newskb, len); - pci_dma_sync_single_for_cpu(adap->pdev, - pci_unmap_addr(sd, dma_addr), len, + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); memcpy(newskb->data, sd->pg_chunk.va, len); - pci_dma_sync_single_for_device(adap->pdev, - pci_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_device(adap->pdev, dma_addr, + len, + PCI_DMA_FROMDEVICE); } else if (!drop_thres) return NULL; recycle: @@ -820,16 +865,25 @@ recycle: if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) goto recycle; + prefetch(sd->pg_chunk.p_cnt); + if (!skb) newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); + if (unlikely(!newskb)) { if (!drop_thres) return NULL; goto recycle; } - pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, + PCI_DMA_FROMDEVICE); + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt) + pci_unmap_page(adap->pdev, + pci_unmap_addr(&sd->pg_chunk, mapping), + fl->alloc_size, + PCI_DMA_FROMDEVICE); if (!skb) { __skb_put(newskb, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); @@ -1958,8 +2012,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, skb_pull(skb, sizeof(*p) + pad); skb->protocol = eth_type_trans(skb, adap->port[p->iff]); pi = netdev_priv(skb->dev); - if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) && - !p->fragment) { + if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && + p->csum == htons(0xffff) && !p->fragment) { qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else @@ -2034,10 +2088,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, fl->credits--; len -= offset; - pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_cpu(adap->pdev, + pci_unmap_addr(sd, dma_addr), + fl->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); + + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt) + pci_unmap_page(adap->pdev, + pci_unmap_addr(&sd->pg_chunk, mapping), + fl->alloc_size, + PCI_DMA_FROMDEVICE); - prefetch(&qs->lro_frag_tbl); + prefetch(qs->lro_va); rx_frag += nr_frags; rx_frag->page = sd->pg_chunk.page; @@ -2047,6 +2110,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, qs->lro_frag_tbl.nr_frags++; qs->lro_frag_tbl.len = frag_len; + if (!complete) return; @@ -2236,6 +2300,8 @@ no_mem: if (fl->use_pages) { void *addr = fl->sdesc[fl->cidx].pg_chunk.va; + prefetch(&qs->lro_frag_tbl); + prefetch(addr); #if L1_CACHE_BYTES < 128 prefetch(addr + L1_CACHE_BYTES); @@ -2972,21 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; q->fl[0].order = FL0_PG_ORDER; q->fl[1].order = FL1_PG_ORDER; + q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; + q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; spin_lock_irq(&adapter->sge.reg_lock); /* FL threshold comparison uses < */ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, q->rspq.phys_addr, q->rspq.size, - q->fl[0].buf_size, 1, 0); + q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); if (ret) goto err_unlock; for (i = 0; i < SGE_RXQ_PER_SET; ++i) { ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, q->fl[i].phys_addr, q->fl[i].size, - q->fl[i].buf_size, p->cong_thres, 1, - 0); + q->fl[i].buf_size - SGE_PG_RSVD, + p->cong_thres, 1, 0); if (ret) goto err_unlock; } -- cgit v0.10.2 From f9384d41c02408dd404aa64d66d0ef38adcf6479 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 01:09:17 -0700 Subject: sparc64: Fix MM refcount check in smp_flush_tlb_pending(). As explained by Benjamin Herrenschmidt: > CPU 0 is running the context, task->mm == task->active_mm == your > context. The CPU is in userspace happily churning things. > > CPU 1 used to run it, not anymore, it's now running fancyfsd which > is a kernel thread, but current->active_mm still points to that > same context. > > Because there's only one "real" user, mm_users is 1 (but mm_count is > elevated, it's just that the presence on CPU 1 as active_mm has no > effect on mm_count(). > > At this point, fancyfsd decides to invalidate a mapping currently mapped > by that context, for example because a networked file has changed > remotely or something like that, using unmap_mapping_ranges(). > > So CPU 1 goes into the zapping code, which eventually ends up calling > flush_tlb_pending(). Your test will succeed, as current->active_mm is > indeed the target mm for the flush, and mm_users is indeed 1. So you > will -not- send an IPI to the other CPU, and CPU 0 will continue happily > accessing the pages that should have been unmapped. To fix this problem, check ->mm instead of ->active_mm, and this means: > So if you test current->mm, you effectively account for mm_users == 1, > so the only way the mm can be active on another processor is as a lazy > mm for a kernel thread. So your test should work properly as long > as you don't have a HW that will do speculative TLB reloads into the > TLB on that other CPU (and even if you do, you flush-on-switch-in should > get rid of any crap here). And therefore we should be OK. Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 6cd1a5b..79457f6 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void) * If the address space is non-shared (ie. mm->count == 1) we avoid * cross calls when we want to flush the currently running process's * tlb state. This is done by clearing all cpu bits except the current - * processor's in current->active_mm->cpu_vm_mask and performing the + * processor's in current->mm->cpu_vm_mask and performing the * flush locally only. This will force any subsequent cpus which run * this task to flush the context from the local tlb if the process * migrates to another cpu (again). @@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long u32 ctx = CTX_HWBITS(mm->context); int cpu = get_cpu(); - if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) mm->cpu_vm_mask = cpumask_of_cpu(cpu); else smp_cross_call_masked(&xcall_flush_tlb_pending, -- cgit v0.10.2 From bd14ba842cd29edbbd40e566194bd33cf0c92f6c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 01:10:58 -0700 Subject: gianfar: Fix kfree(skb) Noticed by Li Yang. Signed-off-by: David S. Miller diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index a7a6737..44cbf26 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1317,7 +1317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); if (!skb_new) { dev->stats.tx_errors++; - kfree(skb); + kfree_skb(skb); return NETDEV_TX_OK; } kfree_skb(skb); -- cgit v0.10.2 From ec1ab0abde0af586a59541ad71841f022dcac3e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 9 May 2008 12:35:29 +0200 Subject: affs: fix missing unlocks in affs_remove_link In two error cases affs_remove_link doesn't call affs_unlock_dir to release the i_hash_lock semaphore. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 8055730..7d0f0a3 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -179,14 +179,18 @@ affs_remove_link(struct dentry *dentry) affs_lock_dir(dir); affs_fix_dcache(dentry, link_ino); retval = affs_remove_hash(dir, link_bh); - if (retval) + if (retval) { + affs_unlock_dir(dir); goto done; + } mark_buffer_dirty_inode(link_bh, inode); memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32); retval = affs_insert_hash(dir, bh); - if (retval) + if (retval) { + affs_unlock_dir(dir); goto done; + } mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); -- cgit v0.10.2 From 2b1c6bd77d4e6a727ffac8630cd154b2144b751a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 10:09:09 +0100 Subject: generic compat_sys_ustat Due to a different size of ino_t ustat needs a compat handler, but currently only x86 and mips provide one. Add a generic compat_sys_ustat and switch all architectures over to it. Instead of doing various user copy hacks compat_sys_ustat just reimplements sys_ustat as it's trivial. This was suggested by Arnd Bergmann. Found by Eric Sandeen when running xfstests/017 on ppc64, which causes stack smashing warnings on RHEL/Fedora due to the too large amount of data writen by the syscall. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index a46f839..af9405c 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -240,7 +240,7 @@ ia32_syscall_table: data8 sys_ni_syscall data8 sys_umask /* 60 */ data8 sys_chroot - data8 sys_ustat + data8 compat_sys_ustat data8 sys_dup2 data8 sys_getppid data8 sys_getpgrp /* 65 */ diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 1a86f84..784859c 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -356,40 +356,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality) return ret; } -/* ustat compatibility */ -struct ustat32 { - compat_daddr_t f_tfree; - compat_ino_t f_tinode; - char f_fname[6]; - char f_fpack[6]; -}; - -extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf); - -SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32) -{ - int err; - struct ustat tmp; - struct ustat32 tmp32; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - err = sys_ustat(dev, (struct ustat __user *)&tmp); - set_fs(old_fs); - - if (err) - goto out; - - memset(&tmp32, 0, sizeof(struct ustat32)); - tmp32.f_tfree = tmp.f_tfree; - tmp32.f_tinode = tmp.f_tinode; - - err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0; - -out: - return err; -} - SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd, compat_off_t __user *, offset, s32, count) { diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 7438e92..f61d6b0 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -253,7 +253,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_utime /* 6130 */ PTR sys_mknod PTR sys_32_personality - PTR sys_32_ustat + PTR compat_sys_ustat PTR compat_sys_statfs PTR compat_sys_fstatfs /* 6135 */ PTR sys_sysfs diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index b0fef4f..60997f1 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -265,7 +265,7 @@ sys_call_table: PTR sys_olduname PTR sys_umask /* 4060 */ PTR sys_chroot - PTR sys_32_ustat + PTR compat_sys_ustat PTR sys_dup2 PTR sys_getppid PTR sys_getpgrp /* 4065 */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 303d2b6..03b9a01 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -130,7 +130,7 @@ ENTRY_OURS(newuname) ENTRY_SAME(umask) /* 60 */ ENTRY_SAME(chroot) - ENTRY_SAME(ustat) + ENTRY_COMP(ustat) ENTRY_SAME(dup2) ENTRY_SAME(getppid) ENTRY_SAME(getpgrp) /* 65 */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 72353f6..fe16649 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -65,7 +65,7 @@ SYSCALL(ni_syscall) SYSX(sys_ni_syscall,sys_olduname, sys_olduname) COMPAT_SYS_SPU(umask) SYSCALL_SPU(chroot) -SYSCALL(ustat) +COMPAT_SYS(ustat) SYSCALL_SPU(dup2) SYSCALL_SPU(getppid) SYSCALL_SPU(getpgrp) diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 62c706e..87cf5a7 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -252,7 +252,7 @@ sys32_chroot_wrapper: sys32_ustat_wrapper: llgfr %r2,%r2 # dev_t llgtr %r3,%r3 # struct ustat * - jg sys_ustat + jg compat_sys_ustat .globl sys32_dup2_wrapper sys32_dup2_wrapper: diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 5a0d76d..8ef8876 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -557,7 +557,7 @@ ia32_sys_call_table: .quad sys32_olduname .quad sys_umask /* 60 */ .quad sys_chroot - .quad sys32_ustat + .quad compat_sys_ustat .quad sys_dup2 .quad sys_getppid .quad sys_getpgrp /* 65 */ diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 6c0d7f6..efac92f 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name) return err ? -EFAULT : 0; } -long sys32_ustat(unsigned dev, struct ustat32 __user *u32p) -{ - struct ustat u; - mm_segment_t seg; - int ret; - - seg = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ustat(dev, (struct ustat __user *)&u); - set_fs(seg); - if (ret < 0) - return ret; - - if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) || - __put_user((__u32) u.f_tfree, &u32p->f_tfree) || - __put_user((__u32) u.f_tinode, &u32p->f_tfree) || - __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) || - __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack))) - ret = -EFAULT; - return ret; -} - asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs *regs) { diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index 50ca486..1f7e625 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -129,13 +129,6 @@ typedef struct compat_siginfo { } _sifields; } compat_siginfo_t; -struct ustat32 { - __u32 f_tfree; - compat_ino_t f_tinode; - char f_fname[6]; - char f_fpack[6]; -}; - #define IA32_STACK_TOP IA32_PAGE_OFFSET #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index ffb08be..72a6dcd 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -70,8 +70,6 @@ struct old_utsname; asmlinkage long sys32_olduname(struct oldold_utsname __user *); long sys32_uname(struct old_utsname __user *); -long sys32_ustat(unsigned, struct ustat32 __user *); - asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, compat_uptr_t __user *, struct pt_regs *); asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); diff --git a/fs/compat.c b/fs/compat.c index d0145ca..4e0db94 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -378,6 +378,34 @@ out: return error; } +/* + * This is a copy of sys_ustat, just dealing with a structure layout. + * Given how simple this syscall is that apporach is more maintainable + * than the various conversion hacks. + */ +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u) +{ + struct super_block *sb; + struct compat_ustat tmp; + struct kstatfs sbuf; + int err; + + sb = user_get_super(new_decode_dev(dev)); + if (!sb) + return -EINVAL; + err = vfs_statfs(sb->s_root, &sbuf); + drop_super(sb); + if (err) + return err; + + memset(&tmp, 0, sizeof(struct compat_ustat)); + tmp.f_tfree = sbuf.f_bfree; + tmp.f_tinode = sbuf.f_ffree; + if (copy_to_user(u, &tmp, sizeof(struct compat_ustat))) + return -EFAULT; + return 0; +} + static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) { if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || diff --git a/include/linux/compat.h b/include/linux/compat.h index 3fd2194..b880864 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -125,6 +125,13 @@ struct compat_dirent { char d_name[256]; }; +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + typedef union compat_sigval { compat_int_t sival_int; compat_uptr_t sival_ptr; @@ -178,6 +185,7 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, unsigned nsems, const struct compat_timespec __user *timeout); asmlinkage long compat_sys_keyctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage ssize_t compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen); -- cgit v0.10.2 From b6520c81934848cef126d93951f7ce242e0f656d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Jan 2009 19:10:37 +0100 Subject: cleanup d_add_ci Make sure that comments describe what's going on and not how, and always use __d_instantiate instead of two separate branches, one with d_instantiate and one with __d_instantiate. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro diff --git a/fs/dcache.c b/fs/dcache.c index 07e2d4a..90bbd7e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1247,15 +1247,18 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, struct dentry *found; struct dentry *new; - /* Does a dentry matching the name exist already? */ + /* + * First check if a dentry matching the name already exists, + * if not go ahead and create it now. + */ found = d_hash_and_lookup(dentry->d_parent, name); - /* If not, create it now and return */ if (!found) { new = d_alloc(dentry->d_parent, name); if (!new) { error = -ENOMEM; goto err_out; } + found = d_splice_alias(inode, new); if (found) { dput(new); @@ -1263,61 +1266,46 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, } return new; } - /* Matching dentry exists, check if it is negative. */ + + /* + * If a matching dentry exists, and it's not negative use it. + * + * Decrement the reference count to balance the iget() done + * earlier on. + */ if (found->d_inode) { if (unlikely(found->d_inode != inode)) { /* This can't happen because bad inodes are unhashed. */ BUG_ON(!is_bad_inode(inode)); BUG_ON(!is_bad_inode(found->d_inode)); } - /* - * Already have the inode and the dentry attached, decrement - * the reference count to balance the iget() done - * earlier on. We found the dentry using d_lookup() so it - * cannot be disconnected and thus we do not need to worry - * about any NFS/disconnectedness issues here. - */ iput(inode); return found; } + /* * Negative dentry: instantiate it unless the inode is a directory and - * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED), - * in which case d_move() that in place of the found dentry. + * already has a dentry. */ - if (!S_ISDIR(inode->i_mode)) { - /* Not a directory; everything is easy. */ - d_instantiate(found, inode); - return found; - } spin_lock(&dcache_lock); - if (list_empty(&inode->i_dentry)) { - /* - * Directory without a 'disconnected' dentry; we need to do - * d_instantiate() by hand because it takes dcache_lock which - * we already hold. - */ + if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { __d_instantiate(found, inode); spin_unlock(&dcache_lock); security_d_instantiate(found, inode); return found; } + /* - * Directory with a 'disconnected' dentry; get a reference to the - * 'disconnected' dentry. + * In case a directory already has a (disconnected) entry grab a + * reference to it, move it in place and use it. */ new = list_entry(inode->i_dentry.next, struct dentry, d_alias); dget_locked(new); spin_unlock(&dcache_lock); - /* Do security vodoo. */ security_d_instantiate(found, inode); - /* Move new in place of found. */ d_move(new, found); - /* Balance the iget() we did above. */ iput(inode); - /* Throw away found. */ dput(found); - /* Use new as the actual dentry. */ return new; err_out: -- cgit v0.10.2 From c8fe8f30c7fe6ce6fc44a1db7d5bfa5144cd9211 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Jan 2009 19:27:23 +0100 Subject: cleanup may_open Add a switch for the various i_mode fmt cases, and remove the comment about writeability of devices nodes - that part is handled in inode_permission and comment on (briefly) there. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro diff --git a/fs/namei.c b/fs/namei.c index bbc15c2..2a40409 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1486,24 +1486,22 @@ int may_open(struct path *path, int acc_mode, int flag) if (!inode) return -ENOENT; - if (S_ISLNK(inode->i_mode)) + switch (inode->i_mode & S_IFMT) { + case S_IFLNK: return -ELOOP; - - if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE)) - return -EISDIR; - - /* - * FIFO's, sockets and device files are special: they don't - * actually live on the filesystem itself, and as such you - * can write to them even if the filesystem is read-only. - */ - if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { - flag &= ~O_TRUNC; - } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { + case S_IFDIR: + if (acc_mode & MAY_WRITE) + return -EISDIR; + break; + case S_IFBLK: + case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; - + /*FALLTHRU*/ + case S_IFIFO: + case S_IFSOCK: flag &= ~O_TRUNC; + break; } error = inode_permission(inode, acc_mode); -- cgit v0.10.2 From 9e6766cc8c125cf406960a5bfdf1455473f4835c Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Thu, 8 Jan 2009 22:43:48 +0000 Subject: ufs: validate maximum fast symlink size from superblock The maximum fast symlink size is set in the superblock of certain types of UFS filesystem. Before using it we need to check that it isn't longer than the available space we have in the inode. Signed-off-by: Duane Griffin Signed-off-by: Al Viro diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 261a1c2..e1c1fc5 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -636,6 +636,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) unsigned block_size, super_block_size; unsigned flags; unsigned super_block_offset; + unsigned maxsymlen; int ret = -EINVAL; uspi = NULL; @@ -1069,6 +1070,16 @@ magic_found: uspi->s_maxsymlinklen = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); + if (uspi->fs_magic == UFS2_MAGIC) + maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR); + else + maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR); + if (uspi->s_maxsymlinklen > maxsymlen) { + ufs_warning(sb, __func__, "ufs_read_super: excessive maximum " + "fast symlink size (%u)\n", uspi->s_maxsymlinklen); + uspi->s_maxsymlinklen = maxsymlen; + } + inode = ufs_iget(sb, UFS_ROOTINO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); -- cgit v0.10.2 From f33219b7a90c4779a0b59e11fb35ebc4542db328 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Thu, 8 Jan 2009 22:43:49 +0000 Subject: ufs: don't truncate longer ufs2 fast symlinks ufs2 fast symlinks can be twice as long as ufs ones, however the code was using the ufs size in various places. Fix that so ufs2 symlinks over 60 characters aren't truncated. Note that we copy the entire area instead of using the maxsymlinklen field from the superblock. This way we will be more robust against corruption (of the superblock). While we are at it, use memcpy instead of open-coding it with for loops. Signed-off-by: Duane Griffin Signed-off-by: Al Viro diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 39f8778..ac8b324 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -622,7 +622,6 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; mode_t mode; - unsigned i; /* * Copy data to the in-core inode. @@ -655,11 +654,11 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) - ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; + memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, + sizeof(ufs_inode->ui_u2.ui_addr)); } else { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) - ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; + memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, + sizeof(ufs_inode->ui_u2.ui_symlink)); } return 0; } @@ -669,7 +668,6 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; mode_t mode; - unsigned i; UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); /* @@ -704,12 +702,11 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) */ if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) - ufsi->i_u1.u2_i_data[i] = - ufs2_inode->ui_u2.ui_addr.ui_db[i]; + memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, + sizeof(ufs2_inode->ui_u2.ui_addr)); } else { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) - ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; + memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, + sizeof(ufs2_inode->ui_u2.ui_symlink)); } return 0; } @@ -781,7 +778,6 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); - unsigned i; ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); @@ -809,12 +805,12 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; } else if (inode->i_blocks) { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) - ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; + memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, + sizeof(ufs_inode->ui_u2.ui_addr)); } else { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) - ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; + memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, + sizeof(ufs_inode->ui_u2.ui_symlink)); } if (!inode->i_nlink) @@ -825,7 +821,6 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); - unsigned i; UFSD("ENTER\n"); ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); @@ -850,11 +845,11 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; } else if (inode->i_blocks) { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) - ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i]; + memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, + sizeof(ufs_inode->ui_u2.ui_addr)); } else { - for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) - ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; + memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, + sizeof(ufs_inode->ui_u2.ui_symlink)); } if (!inode->i_nlink) diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h index 11c0351..69b3427 100644 --- a/fs/ufs/ufs.h +++ b/fs/ufs/ufs.h @@ -23,7 +23,7 @@ struct ufs_sb_info { struct ufs_inode_info { union { __fs32 i_data[15]; - __u8 i_symlink[4*15]; + __u8 i_symlink[2 * 4 * 15]; __fs64 u2_i_data[15]; } i_u1; __u32 i_flags; -- cgit v0.10.2 From b12903f1384cd176a3994a6bf6caf5a482169cc8 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Thu, 8 Jan 2009 22:43:50 +0000 Subject: ufs: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Signed-off-by: Duane Griffin Signed-off-by: Al Viro diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index ac8b324..3d2512c 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -658,7 +658,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, - sizeof(ufs_inode->ui_u2.ui_symlink)); + sizeof(ufs_inode->ui_u2.ui_symlink) - 1); + ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; } return 0; } @@ -706,7 +707,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) sizeof(ufs2_inode->ui_u2.ui_addr)); } else { memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, - sizeof(ufs2_inode->ui_u2.ui_symlink)); + sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); + ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; } return 0; } -- cgit v0.10.2 From 723be1f30046a46471b00106ebef9d8c832f12e9 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Thu, 8 Jan 2009 22:43:51 +0000 Subject: ufs: copy symlink data into the correct union member Copy symlink data into the union member it is accessed through. Although this shouldn't make a difference to behaviour it makes the code easier to follow and grep through. It may also prevent problems if the struct/union definitions change in the future. Signed-off-by: Duane Griffin Signed-off-by: Al Viro diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index e3a9b1f..23119fe 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -147,7 +147,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, } else { /* fast symlink */ inode->i_op = &ufs_fast_symlink_inode_operations; - memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l); + memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l); inode->i_size = l-1; } mark_inode_dirty(inode); -- cgit v0.10.2 From 10f303ae1e5e77a9f7cb053e6329906afb132c67 Mon Sep 17 00:00:00 2001 From: Cheng Renquan Date: Wed, 14 Jan 2009 17:01:33 +0800 Subject: do_pipe cleanup: drop its last user in arch/alpha/ The last user of do_pipe is in arch/alpha/, after replacing it with do_pipe_flags, the do_pipe can be totally dropped. Signed-off-by: Cheng Renquan Acked-by: Richard Henderson Signed-off-by: Al Viro diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index e4a54b615..b45d913 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -903,8 +903,9 @@ sys_alpha_pipe: stq $26, 0($sp) .prologue 0 + mov $31, $17 lda $16, 8($sp) - jsr $26, do_pipe + jsr $26, do_pipe_flags ldq $26, 0($sp) bne $0, 1f diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ae41f09..42ee059 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -46,8 +46,6 @@ #include #include -extern int do_pipe(int *); - /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite diff --git a/fs/pipe.c b/fs/pipe.c index 14f502b..df37195 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1034,11 +1034,6 @@ int do_pipe_flags(int *fd, int flags) return error; } -int do_pipe(int *fd) -{ - return do_pipe_flags(fd, 0); -} - /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. diff --git a/include/linux/fs.h b/include/linux/fs.h index 92734c0..51de83b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1881,7 +1881,6 @@ static inline void allow_write_access(struct file *file) if (file) atomic_inc(&file->f_path.dentry->d_inode->i_writecount); } -extern int do_pipe(int *); extern int do_pipe_flags(int *, int); extern struct file *create_read_pipe(struct file *f, int flags); extern struct file *create_write_pipe(int flags); -- cgit v0.10.2 From c2aca5e529a2499d454c41e01f59f1d5fe4a1364 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 20 Jan 2009 10:29:45 +0000 Subject: vfs: Update fs.h to use inline functions when no file locking set This avoids various issues which might give rise to compiler warnings about missing functions and/or unused variable with the previous macros. This also fixes a bug where one of the macros was returning 0, but it should have been void. Reported-by: Randy Dunlap Signed-off-by: Steven Whitehouse Tested-by: Randy Dunlap Signed-off-by: Al Viro diff --git a/include/linux/fs.h b/include/linux/fs.h index 51de83b..d84020b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1063,34 +1063,147 @@ extern int lease_modify(struct file_lock **, int); extern int lock_may_read(struct inode *, loff_t start, unsigned long count); extern int lock_may_write(struct inode *, loff_t start, unsigned long count); #else /* !CONFIG_FILE_LOCKING */ -#define fcntl_getlk(a, b) ({ -EINVAL; }) -#define fcntl_setlk(a, b, c, d) ({ -EACCES; }) +static inline int fcntl_getlk(struct file *file, struct flock __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk(unsigned int fd, struct file *file, + unsigned int cmd, struct flock __user *user) +{ + return -EACCES; +} + #if BITS_PER_LONG == 32 -#define fcntl_getlk64(a, b) ({ -EINVAL; }) -#define fcntl_setlk64(a, b, c, d) ({ -EACCES; }) +static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk64(unsigned int fd, struct file *file, + unsigned int cmd, struct flock64 __user *user) +{ + return -EACCES; +} #endif -#define fcntl_setlease(a, b, c) ({ 0; }) -#define fcntl_getlease(a) ({ 0; }) -#define locks_init_lock(a) ({ }) -#define __locks_copy_lock(a, b) ({ }) -#define locks_copy_lock(a, b) ({ }) -#define locks_remove_posix(a, b) ({ }) -#define locks_remove_flock(a) ({ }) -#define posix_test_lock(a, b) ({ 0; }) -#define posix_lock_file(a, b, c) ({ -ENOLCK; }) -#define posix_lock_file_wait(a, b) ({ -ENOLCK; }) -#define posix_unblock_lock(a, b) (-ENOENT) -#define vfs_test_lock(a, b) ({ 0; }) -#define vfs_lock_file(a, b, c, d) (-ENOLCK) -#define vfs_cancel_lock(a, b) ({ 0; }) -#define flock_lock_file_wait(a, b) ({ -ENOLCK; }) -#define __break_lease(a, b) ({ 0; }) -#define lease_get_mtime(a, b) ({ }) -#define generic_setlease(a, b, c) ({ -EINVAL; }) -#define vfs_setlease(a, b, c) ({ -EINVAL; }) -#define lease_modify(a, b) ({ -EINVAL; }) -#define lock_may_read(a, b, c) ({ 1; }) -#define lock_may_write(a, b, c) ({ 1; }) +static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) +{ + return 0; +} + +static inline int fcntl_getlease(struct file *filp) +{ + return 0; +} + +static inline void locks_init_lock(struct file_lock *fl) +{ + return; +} + +static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) +{ + return; +} + +static inline void locks_remove_flock(struct file *filp) +{ + return; +} + +static inline void posix_test_lock(struct file *filp, struct file_lock *fl) +{ + return; +} + +static inline int posix_lock_file(struct file *filp, struct file_lock *fl, + struct file_lock *conflock) +{ + return -ENOLCK; +} + +static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) +{ + return -ENOLCK; +} + +static inline int posix_unblock_lock(struct file *filp, + struct file_lock *waiter) +{ + return -ENOENT; +} + +static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int vfs_lock_file(struct file *filp, unsigned int cmd, + struct file_lock *fl, struct file_lock *conf) +{ + return -ENOLCK; +} + +static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int flock_lock_file_wait(struct file *filp, + struct file_lock *request) +{ + return -ENOLCK; +} + +static inline int __break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + +static inline void lease_get_mtime(struct inode *inode, struct timespec *time) +{ + return; +} + +static inline int generic_setlease(struct file *filp, long arg, + struct file_lock **flp) +{ + return -EINVAL; +} + +static inline int vfs_setlease(struct file *filp, long arg, + struct file_lock **lease) +{ + return -EINVAL; +} + +static inline int lease_modify(struct file_lock **before, int arg) +{ + return -EINVAL; +} + +static inline int lock_may_read(struct inode *inode, loff_t start, + unsigned long len) +{ + return 1; +} + +static inline int lock_may_write(struct inode *inode, loff_t start, + unsigned long len) +{ + return 1; +} + #endif /* !CONFIG_FILE_LOCKING */ -- cgit v0.10.2 From af5df56688acfb75c1b15b4e000ec5e82a9cdc29 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 20 Jan 2009 10:29:46 +0000 Subject: vfs: Further changes from macro to inline function in fs.h There is a second set of macros for when CONFIG_FILE_LOCKING is not set. This patch updates those to become inline functions as well. Signed-off-by: Steven Whitehouse Signed-off-by: Al Viro diff --git a/include/linux/fs.h b/include/linux/fs.h index d84020b..5f74d61 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1800,13 +1800,44 @@ static inline int break_lease(struct inode *inode, unsigned int mode) return 0; } #else /* !CONFIG_FILE_LOCKING */ -#define locks_mandatory_locked(a) ({ 0; }) -#define locks_mandatory_area(a, b, c, d, e) ({ 0; }) -#define __mandatory_lock(a) ({ 0; }) -#define mandatory_lock(a) ({ 0; }) -#define locks_verify_locked(a) ({ 0; }) -#define locks_verify_truncate(a, b, c) ({ 0; }) -#define break_lease(a, b) ({ 0; }) +static inline int locks_mandatory_locked(struct inode *inode) +{ + return 0; +} + +static inline int locks_mandatory_area(int rw, struct inode *inode, + struct file *filp, loff_t offset, + size_t count) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +static inline int break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + #endif /* CONFIG_FILE_LOCKING */ /* fs/open.c */ -- cgit v0.10.2 From a9f184f02aa49d46c4c35311d93cbcd1c61149df Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 28 Jan 2009 16:57:12 -0800 Subject: devpts: Must release s_umount on error We should drop the ->s_umount mutex if an error occurs after the sget()/grab_super() call. This was introduced when adding support for multiple instances of devpts and noticed during a code review/reorg. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Al Viro diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index bff4052..140b431 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -385,6 +385,7 @@ static int new_pts_mount(struct file_system_type *fs_type, int flags, fail: dput(mnt->mnt_sb->s_root); + up_write(&mnt->mnt_sb->s_umount); deactivate_super(mnt->mnt_sb); return err; } @@ -473,6 +474,7 @@ static int init_pts_mount(struct file_system_type *fs_type, int flags, err = mknod_ptmx(mnt->mnt_sb); if (err) { dput(mnt->mnt_sb->s_root); + up_write(&mnt->mnt_sb->s_umount); deactivate_super(mnt->mnt_sb); } -- cgit v0.10.2 From e56980d451904b623573ef4966cbab768e433c79 Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Wed, 11 Feb 2009 13:14:54 -0800 Subject: fs: make struct dentry->d_op const This change will allow for tagging many dentry_operations const in the source tree. Signed-off-by: Jan Engelhardt Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Al Viro diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c66d224..1515636 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -112,7 +112,7 @@ struct dentry { struct list_head d_subdirs; /* our children */ struct list_head d_alias; /* inode alias list */ unsigned long d_time; /* used by d_revalidate */ - struct dentry_operations *d_op; + const struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ void *d_fsdata; /* fs-specific data */ -- cgit v0.10.2 From f786aa90e026f2174bb0c26d49f338c5c46ede55 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:51:22 +0000 Subject: constify dentry_operations: NFS Signed-off-by: Al Viro diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 672368f..78bf72f 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -899,7 +899,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) iput(inode); } -struct dentry_operations nfs_dentry_operations = { +const struct dentry_operations nfs_dentry_operations = { .d_revalidate = nfs_lookup_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, @@ -967,7 +967,7 @@ out: #ifdef CONFIG_NFS_V4 static int nfs_open_revalidate(struct dentry *, struct nameidata *); -struct dentry_operations nfs4_dentry_operations = { +const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs_open_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 4e4d332..84345de 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -179,7 +179,7 @@ struct nfs4_state_recovery_ops { int (*recover_lock)(struct nfs4_state *, struct file_lock *); }; -extern struct dentry_operations nfs4_dentry_operations; +extern const struct dentry_operations nfs4_dentry_operations; extern const struct inode_operations nfs4_dir_inode_operations; /* inode.c */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index db867b0..8cc8807 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -415,7 +415,7 @@ extern const struct inode_operations nfs_dir_inode_operations; extern const struct inode_operations nfs3_dir_inode_operations; #endif /* CONFIG_NFS_V3 */ extern const struct file_operations nfs_dir_operations; -extern struct dentry_operations nfs_dentry_operations; +extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 2e5f000..43a713f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -785,7 +785,7 @@ struct nfs_access_entry; */ struct nfs_rpc_ops { u32 version; /* Protocol version */ - struct dentry_operations *dentry_ops; + const struct dentry_operations *dentry_ops; const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; -- cgit v0.10.2 From e16404ed0f3f330dc3e99b95cef69bb60bcd27f7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:55:13 +0000 Subject: constify dentry_operations: misc filesystems Signed-off-by: Al Viro diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h index 8311575..e0a85db 100644 --- a/fs/adfs/adfs.h +++ b/fs/adfs/adfs.h @@ -86,7 +86,7 @@ void __adfs_error(struct super_block *sb, const char *function, /* dir_*.c */ extern const struct inode_operations adfs_dir_inode_operations; extern const struct file_operations adfs_dir_operations; -extern struct dentry_operations adfs_dentry_operations; +extern const struct dentry_operations adfs_dentry_operations; extern struct adfs_dir_ops adfs_f_dir_ops; extern struct adfs_dir_ops adfs_fplus_dir_ops; diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index 85a30e9..e867ccf 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -263,7 +263,7 @@ adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name) return 0; } -struct dentry_operations adfs_dentry_operations = { +const struct dentry_operations adfs_dentry_operations = { .d_hash = adfs_hash, .d_compare = adfs_compare, }; diff --git a/fs/affs/affs.h b/fs/affs/affs.h index e9ec915..1a2d5e3 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -199,8 +199,7 @@ extern const struct address_space_operations affs_symlink_aops; extern const struct address_space_operations affs_aops; extern const struct address_space_operations affs_aops_ofs; -extern struct dentry_operations affs_dentry_operations; -extern struct dentry_operations affs_dentry_operations_intl; +extern const struct dentry_operations affs_dentry_operations; static inline void affs_set_blocksize(struct super_block *sb, int size) diff --git a/fs/affs/namei.c b/fs/affs/namei.c index cfcf1b6..960d336 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -19,12 +19,12 @@ static int affs_intl_toupper(int ch); static int affs_intl_hash_dentry(struct dentry *, struct qstr *); static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *); -struct dentry_operations affs_dentry_operations = { +const struct dentry_operations affs_dentry_operations = { .d_hash = affs_hash_dentry, .d_compare = affs_compare_dentry, }; -static struct dentry_operations affs_intl_dentry_operations = { +static const struct dentry_operations affs_intl_dentry_operations = { .d_hash = affs_intl_hash_dentry, .d_compare = affs_intl_compare_dentry, }; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 75b1fa9..4bb9d0a 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -59,7 +59,7 @@ static int coda_return_EIO(void) } #define CODA_EIO_ERROR ((void *) (coda_return_EIO)) -static struct dentry_operations coda_dentry_operations = +static const struct dentry_operations coda_dentry_operations = { .d_revalidate = coda_dentry_revalidate, .d_delete = coda_dentry_delete, diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index 9955232..052387e 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h @@ -213,7 +213,7 @@ extern void hfs_mdb_put(struct super_block *); extern int hfs_part_find(struct super_block *, sector_t *, sector_t *); /* string.c */ -extern struct dentry_operations hfs_dentry_operations; +extern const struct dentry_operations hfs_dentry_operations; extern int hfs_hash_dentry(struct dentry *, struct qstr *); extern int hfs_strcmp(const unsigned char *, unsigned int, diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c index 5bf89ec..7478f5c 100644 --- a/fs/hfs/sysdep.c +++ b/fs/hfs/sysdep.c @@ -31,7 +31,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd) return 1; } -struct dentry_operations hfs_dentry_operations = +const struct dentry_operations hfs_dentry_operations = { .d_revalidate = hfs_revalidate_dentry, .d_hash = hfs_hash_dentry, diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index f027a90..5c10d80 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -327,7 +327,7 @@ void hfsplus_file_truncate(struct inode *); /* inode.c */ extern const struct address_space_operations hfsplus_aops; extern const struct address_space_operations hfsplus_btree_aops; -extern struct dentry_operations hfsplus_dentry_operations; +extern const struct dentry_operations hfsplus_dentry_operations; void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *); void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *); diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index f105ee9..1bcf597 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -137,7 +137,7 @@ const struct address_space_operations hfsplus_aops = { .writepages = hfsplus_writepages, }; -struct dentry_operations hfsplus_dentry_operations = { +const struct dentry_operations hfsplus_dentry_operations = { .d_hash = hfsplus_hash_dentry, .d_compare = hfsplus_compare_dentry, }; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 5c538e0..fe02ad4 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -31,12 +31,12 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) #define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode) -int hostfs_d_delete(struct dentry *dentry) +static int hostfs_d_delete(struct dentry *dentry) { return 1; } -struct dentry_operations hostfs_dentry_ops = { +static const struct dentry_operations hostfs_dentry_ops = { .d_delete = hostfs_d_delete, }; diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c index 0831912..940d6d1 100644 --- a/fs/hpfs/dentry.c +++ b/fs/hpfs/dentry.c @@ -49,7 +49,7 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst return 0; } -static struct dentry_operations hpfs_dentry_operations = { +static const struct dentry_operations hpfs_dentry_operations = { .d_hash = hpfs_hash_dentry, .d_compare = hpfs_compare_dentry, }; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 6147ec3..13d2edd 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -114,7 +114,7 @@ static const struct super_operations isofs_sops = { }; -static struct dentry_operations isofs_dentry_ops[] = { +static const struct dentry_operations isofs_dentry_ops[] = { { .d_hash = isofs_hash, .d_compare = isofs_dentry_cmp, diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 07e9715..9c59072 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -79,7 +79,7 @@ static int ncp_hash_dentry(struct dentry *, struct qstr *); static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *); static int ncp_delete_dentry(struct dentry *); -static struct dentry_operations ncp_dentry_operations = +static const struct dentry_operations ncp_dentry_operations = { .d_revalidate = ncp_lookup_validate, .d_hash = ncp_hash_dentry, @@ -87,7 +87,7 @@ static struct dentry_operations ncp_dentry_operations = .d_delete = ncp_delete_dentry, }; -struct dentry_operations ncp_root_dentry_operations = +const struct dentry_operations ncp_root_dentry_operations = { .d_hash = ncp_hash_dentry, .d_compare = ncp_compare_dentry, diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index ad92461..ae881cc 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -1136,7 +1136,7 @@ xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name) return 1; } -static struct dentry_operations xattr_lookup_poison_ops = { +static const struct dentry_operations xattr_lookup_poison_ops = { .d_compare = xattr_lookup_poison, }; diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index e7ddd03..3e4803b 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c @@ -277,7 +277,7 @@ static int smb_hash_dentry(struct dentry *, struct qstr *); static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *); static int smb_delete_dentry(struct dentry *); -static struct dentry_operations smbfs_dentry_operations = +static const struct dentry_operations smbfs_dentry_operations = { .d_revalidate = smb_lookup_validate, .d_hash = smb_hash_dentry, @@ -285,7 +285,7 @@ static struct dentry_operations smbfs_dentry_operations = .d_delete = smb_delete_dentry, }; -static struct dentry_operations smbfs_dentry_operations_case = +static const struct dentry_operations smbfs_dentry_operations_case = { .d_revalidate = smb_lookup_validate, .d_delete = smb_delete_dentry, diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index a1f1ef3..33e047b 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -38,7 +38,7 @@ static int sysv_hash(struct dentry *dentry, struct qstr *qstr) return 0; } -struct dentry_operations sysv_dentry_operations = { +const struct dentry_operations sysv_dentry_operations = { .d_hash = sysv_hash, }; diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 38ebe3f..5784a31 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h @@ -170,7 +170,7 @@ extern const struct file_operations sysv_file_operations; extern const struct file_operations sysv_dir_operations; extern const struct address_space_operations sysv_aops; extern const struct super_operations sysv_sops; -extern struct dentry_operations sysv_dentry_operations; +extern const struct dentry_operations sysv_dentry_operations; enum { diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index f69e66d..30b06c8 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h @@ -204,7 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *); /* linux/fs/ncpfs/dir.c */ extern const struct inode_operations ncp_dir_inode_operations; extern const struct file_operations ncp_dir_operations; -extern struct dentry_operations ncp_root_dentry_operations; +extern const struct dentry_operations ncp_root_dentry_operations; int ncp_conn_logged_in(struct super_block *); int ncp_date_dos2unix(__le16 time, __le16 date); void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); -- cgit v0.10.2 From a488257ce5a55c53973671218791296463698d07 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:55:46 +0000 Subject: constify dentry_operations: 9p Signed-off-by: Al Viro diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index c295ba7..f0c7de7 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -41,8 +41,8 @@ extern struct file_system_type v9fs_fs_type; extern const struct address_space_operations v9fs_addr_operations; extern const struct file_operations v9fs_file_operations; extern const struct file_operations v9fs_dir_operations; -extern struct dentry_operations v9fs_dentry_operations; -extern struct dentry_operations v9fs_cached_dentry_operations; +extern const struct dentry_operations v9fs_dentry_operations; +extern const struct dentry_operations v9fs_cached_dentry_operations; struct inode *v9fs_get_inode(struct super_block *sb, int mode); ino_t v9fs_qid2ino(struct p9_qid *qid); diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 06dcc7c..d743252 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -104,12 +104,12 @@ void v9fs_dentry_release(struct dentry *dentry) } } -struct dentry_operations v9fs_cached_dentry_operations = { +const struct dentry_operations v9fs_cached_dentry_operations = { .d_delete = v9fs_cached_dentry_delete, .d_release = v9fs_dentry_release, }; -struct dentry_operations v9fs_dentry_operations = { +const struct dentry_operations v9fs_dentry_operations = { .d_delete = v9fs_dentry_delete, .d_release = v9fs_dentry_release, }; -- cgit v0.10.2 From 08f11513fa6f712506edb99327f7d051da9d860f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:56:19 +0000 Subject: constify dentry_operations: autofs, autofs4 Signed-off-by: Al Viro diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 8aacade..4a1401c 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -192,7 +192,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd) return 1; } -static struct dentry_operations autofs_dentry_operations = { +static const struct dentry_operations autofs_dentry_operations = { .d_revalidate = autofs_revalidate, }; diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 716e12b..69c8142 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -310,7 +310,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi) return ino; } -static struct dentry_operations autofs4_sb_dentry_operations = { +static const struct dentry_operations autofs4_sb_dentry_operations = { .d_release = autofs4_dentry_release, }; diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 2a41c2a..74b1469 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -349,13 +349,13 @@ void autofs4_dentry_release(struct dentry *de) } /* For dentries of directories in the root dir */ -static struct dentry_operations autofs4_root_dentry_operations = { +static const struct dentry_operations autofs4_root_dentry_operations = { .d_revalidate = autofs4_revalidate, .d_release = autofs4_dentry_release, }; /* For other dentries */ -static struct dentry_operations autofs4_dentry_operations = { +static const struct dentry_operations autofs4_dentry_operations = { .d_revalidate = autofs4_revalidate, .d_release = autofs4_dentry_release, }; -- cgit v0.10.2 From 79be57cc7fd25563c73ab26b0c28ff6ad0d618fc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:56:47 +0000 Subject: constify dentry_operations: AFS Signed-off-by: Al Viro diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 99cf390..9bd7577 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = { .setattr = afs_setattr, }; -static struct dentry_operations afs_fs_dentry_operations = { +static const struct dentry_operations afs_fs_dentry_operations = { .d_revalidate = afs_d_revalidate, .d_delete = afs_d_delete, .d_release = afs_d_release, -- cgit v0.10.2 From 4fd03e84d8f4e6304cef19698a24dee84039ef01 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:57:07 +0000 Subject: constify dentry_operations: CIFS Signed-off-by: Al Viro diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 2b1d28a..77e190d 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -78,8 +78,8 @@ extern int cifs_dir_open(struct inode *inode, struct file *file); extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); /* Functions related to dir entries */ -extern struct dentry_operations cifs_dentry_ops; -extern struct dentry_operations cifs_ci_dentry_ops; +extern const struct dentry_operations cifs_dentry_ops; +extern const struct dentry_operations cifs_ci_dentry_ops; /* Functions related to symlinks */ extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 89fb728..43cc0fd 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -699,7 +699,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) return rc; } */ -struct dentry_operations cifs_dentry_ops = { +const struct dentry_operations cifs_dentry_ops = { .d_revalidate = cifs_d_revalidate, /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; @@ -737,7 +737,7 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a, return 1; } -struct dentry_operations cifs_ci_dentry_ops = { +const struct dentry_operations cifs_ci_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_hash = cifs_ci_hash, .d_compare = cifs_ci_compare, -- cgit v0.10.2 From 5a3fd05a9bb2f104020fbfc4551ad4aaed4660a4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:57:52 +0000 Subject: constify dentry_operations: ecryptfs Signed-off-by: Al Viro diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 5e59658..2dda5ad 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -89,7 +89,7 @@ static void ecryptfs_d_release(struct dentry *dentry) return; } -struct dentry_operations ecryptfs_dops = { +const struct dentry_operations ecryptfs_dops = { .d_revalidate = ecryptfs_d_revalidate, .d_release = ecryptfs_d_release, }; diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index ac749d4..064c582 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -580,7 +580,7 @@ extern const struct inode_operations ecryptfs_main_iops; extern const struct inode_operations ecryptfs_dir_iops; extern const struct inode_operations ecryptfs_symlink_iops; extern const struct super_operations ecryptfs_sops; -extern struct dentry_operations ecryptfs_dops; +extern const struct dentry_operations ecryptfs_dops; extern struct address_space_operations ecryptfs_aops; extern int ecryptfs_verbosity; extern unsigned int ecryptfs_message_buf_len; -- cgit v0.10.2 From d72f71eb0edd629c95715aa7305b0259d3581e34 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:58:47 +0000 Subject: constify dentry_operations: procfs Signed-off-by: Al Viro diff --git a/fs/proc/base.c b/fs/proc/base.c index beaa0ce..aef6d55 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1545,7 +1545,7 @@ static int pid_delete_dentry(struct dentry * dentry) return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; } -static struct dentry_operations pid_dentry_operations = +static const struct dentry_operations pid_dentry_operations = { .d_revalidate = pid_revalidate, .d_delete = pid_delete_dentry, @@ -1717,7 +1717,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) return 0; } -static struct dentry_operations tid_fd_dentry_operations = +static const struct dentry_operations tid_fd_dentry_operations = { .d_revalidate = tid_fd_revalidate, .d_delete = pid_delete_dentry, @@ -2339,7 +2339,7 @@ static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) return 0; } -static struct dentry_operations proc_base_dentry_operations = +static const struct dentry_operations proc_base_dentry_operations = { .d_revalidate = proc_base_revalidate, .d_delete = pid_delete_dentry, diff --git a/fs/proc/generic.c b/fs/proc/generic.c index db7fa5c..5d2989e 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -363,7 +363,7 @@ static int proc_delete_dentry(struct dentry * dentry) return 1; } -static struct dentry_operations proc_dentry_operations = +static const struct dentry_operations proc_dentry_operations = { .d_delete = proc_delete_dentry, }; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 94fcfff..9b1e4e9 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -7,7 +7,7 @@ #include #include "internal.h" -static struct dentry_operations proc_sys_dentry_operations; +static const struct dentry_operations proc_sys_dentry_operations; static const struct file_operations proc_sys_file_operations; static const struct inode_operations proc_sys_inode_operations; static const struct file_operations proc_sys_dir_file_operations; @@ -396,7 +396,7 @@ static int proc_sys_compare(struct dentry *dir, struct qstr *qstr, return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl); } -static struct dentry_operations proc_sys_dentry_operations = { +static const struct dentry_operations proc_sys_dentry_operations = { .d_revalidate = proc_sys_revalidate, .d_delete = proc_sys_delete, .d_compare = proc_sys_compare, -- cgit v0.10.2 From 4269590a72934bb901b33b686e20605bf66653c4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:59:13 +0000 Subject: constify dentry_operations: FUSE Signed-off-by: Al Viro diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index fdff346..06da052 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -224,7 +224,7 @@ static int invalid_nodeid(u64 nodeid) return !nodeid || nodeid == FUSE_ROOT_ID; } -struct dentry_operations fuse_dentry_operations = { +const struct dentry_operations fuse_dentry_operations = { .d_revalidate = fuse_dentry_revalidate, }; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 5e64b81..6fc5aed 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -493,7 +493,7 @@ static inline u64 get_node_id(struct inode *inode) /** Device operations */ extern const struct file_operations fuse_dev_operations; -extern struct dentry_operations fuse_dentry_operations; +extern const struct dentry_operations fuse_dentry_operations; /** * Get a filled in inode -- cgit v0.10.2 From ce6cdc474aa5bf4c1a7dc698d83bb0622846a81d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 05:59:46 +0000 Subject: constify dentry_operations: FAT Signed-off-by: Al Viro diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 7ba03a4..da3f361 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -188,7 +188,7 @@ old_compare: goto out; } -static struct dentry_operations msdos_dentry_operations = { +static const struct dentry_operations msdos_dentry_operations = { .d_hash = msdos_hash, .d_compare = msdos_cmp, }; diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 8ae32e3..a0e00e3 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -166,13 +166,13 @@ static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b) return 1; } -static struct dentry_operations vfat_ci_dentry_ops = { +static const struct dentry_operations vfat_ci_dentry_ops = { .d_revalidate = vfat_revalidate_ci, .d_hash = vfat_hashi, .d_compare = vfat_cmpi, }; -static struct dentry_operations vfat_dentry_ops = { +static const struct dentry_operations vfat_dentry_ops = { .d_revalidate = vfat_revalidate, .d_hash = vfat_hash, .d_compare = vfat_cmp, -- cgit v0.10.2 From 92cecbbfa3785a944c733a284b77faee5b82b70c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:00:05 +0000 Subject: constify dentry_operations: GFS2 Signed-off-by: Al Viro diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c index c2ad363..aea4d50 100644 --- a/fs/gfs2/ops_dentry.c +++ b/fs/gfs2/ops_dentry.c @@ -108,7 +108,7 @@ static int gfs2_dhash(struct dentry *dentry, struct qstr *str) return 0; } -struct dentry_operations gfs2_dops = { +const struct dentry_operations gfs2_dops = { .d_revalidate = gfs2_drevalidate, .d_hash = gfs2_dhash, }; diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index f6b8b00..a931e44 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -47,7 +47,7 @@ extern struct file_system_type gfs2_fs_type; extern struct file_system_type gfs2meta_fs_type; extern const struct export_operations gfs2_export_ops; extern const struct super_operations gfs2_super_ops; -extern struct dentry_operations gfs2_dops; +extern const struct dentry_operations gfs2_dops; #endif /* __SUPER_DOT_H__ */ -- cgit v0.10.2 From d8fba0ffe5e7442fc2560873ec901be6e56602a1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:00:26 +0000 Subject: constify dentry_operations: OCFS2 Signed-off-by: Al Viro diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index e9d7c20..7d60448 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -455,7 +455,7 @@ out_move: d_move(dentry, target); } -struct dentry_operations ocfs2_dentry_ops = { +const struct dentry_operations ocfs2_dentry_ops = { .d_revalidate = ocfs2_dentry_revalidate, .d_iput = ocfs2_dentry_iput, }; diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h index d06e16c..faa12e7 100644 --- a/fs/ocfs2/dcache.h +++ b/fs/ocfs2/dcache.h @@ -26,7 +26,7 @@ #ifndef OCFS2_DCACHE_H #define OCFS2_DCACHE_H -extern struct dentry_operations ocfs2_dentry_ops; +extern const struct dentry_operations ocfs2_dentry_ops; struct ocfs2_dentry_lock { /* Use count of dentry lock */ -- cgit v0.10.2 From ad28b4ef1937b11432cd64fe1ebad714f8e253bd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:00:49 +0000 Subject: constify dentry_operations: JFS Signed-off-by: Al Viro diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index adb2faf..1eff7db 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -47,5 +47,5 @@ extern const struct file_operations jfs_dir_operations; extern const struct inode_operations jfs_file_inode_operations; extern const struct file_operations jfs_file_operations; extern const struct inode_operations jfs_symlink_inode_operations; -extern struct dentry_operations jfs_ci_dentry_operations; +extern const struct dentry_operations jfs_ci_dentry_operations; #endif /* _H_JFS_INODE */ diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index b4de56b..5f80aef 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -35,7 +35,7 @@ /* * forward references */ -struct dentry_operations jfs_ci_dentry_operations; +const struct dentry_operations jfs_ci_dentry_operations; static s64 commitZeroLink(tid_t, struct inode *); @@ -1595,7 +1595,7 @@ out: return result; } -struct dentry_operations jfs_ci_dentry_operations = +const struct dentry_operations jfs_ci_dentry_operations = { .d_hash = jfs_ci_hash, .d_compare = jfs_ci_compare, -- cgit v0.10.2 From ee1ec32903fc3139af00ebc7ee483dabca3f4fa5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:01:46 +0000 Subject: constify dentry_operations: sysfs Signed-off-by: Al Viro diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 82d3b79..6b8fe71 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -302,7 +302,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode) iput(inode); } -static struct dentry_operations sysfs_dentry_ops = { +static const struct dentry_operations sysfs_dentry_ops = { .d_iput = sysfs_d_iput, }; -- cgit v0.10.2 From 296c2d86635bd6ecd8f282dfff18bb68fb4fc512 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:02:01 +0000 Subject: constify dentry_operations: configfs Signed-off-by: Al Viro diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 8e93341..05373db 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -72,7 +72,7 @@ static int configfs_d_delete(struct dentry *dentry) return 1; } -static struct dentry_operations configfs_dentry_ops = { +static const struct dentry_operations configfs_dentry_ops = { .d_iput = configfs_d_iput, /* simple_delete_dentry() isn't exported */ .d_delete = configfs_d_delete, -- cgit v0.10.2 From 3ba13d179e8c24c68eac32b93593a6b10fcd1572 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 20 Feb 2009 06:02:22 +0000 Subject: constify dentry_operations: rest Signed-off-by: Al Viro diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 0e49975..5c0f408 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry) return 1; } -static struct dentry_operations pfmfs_dentry_operations = { +static const struct dentry_operations pfmfs_dentry_operations = { .d_delete = pfmfs_delete_dentry, }; diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 3bbdb9d..1dd96d4 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -48,7 +48,7 @@ static struct file_system_type anon_inode_fs_type = { .get_sb = anon_inodefs_get_sb, .kill_sb = kill_anon_super, }; -static struct dentry_operations anon_inodefs_dentry_operations = { +static const struct dentry_operations anon_inodefs_dentry_operations = { .d_delete = anon_inodefs_delete_dentry, }; diff --git a/fs/libfs.c b/fs/libfs.c index 49b4409..ec600bd 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -44,7 +44,7 @@ static int simple_delete_dentry(struct dentry *dentry) */ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - static struct dentry_operations simple_dentry_operations = { + static const struct dentry_operations simple_dentry_operations = { .d_delete = simple_delete_dentry, }; diff --git a/fs/pipe.c b/fs/pipe.c index df37195..6ddf052 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -870,7 +870,7 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) dentry->d_inode->i_ino); } -static struct dentry_operations pipefs_dentry_operations = { +static const struct dentry_operations pipefs_dentry_operations = { .d_delete = pipefs_delete_dentry, .d_dname = pipefs_dname, }; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9edb5c4..b01100e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1627,7 +1627,7 @@ static struct inode_operations cgroup_dir_inode_operations = { static int cgroup_create_file(struct dentry *dentry, int mode, struct super_block *sb) { - static struct dentry_operations cgroup_dops = { + static const struct dentry_operations cgroup_dops = { .d_iput = cgroup_diput, }; diff --git a/net/socket.c b/net/socket.c index 35dd737..2f895f6 100644 --- a/net/socket.c +++ b/net/socket.c @@ -328,7 +328,7 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) dentry->d_inode->i_ino); } -static struct dentry_operations sockfs_dentry_operations = { +static const struct dentry_operations sockfs_dentry_operations = { .d_delete = sockfs_delete_dentry, .d_dname = sockfs_dname, }; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 577385a..9ced062 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -480,7 +480,7 @@ static int rpc_delete_dentry(struct dentry *dentry) return 1; } -static struct dentry_operations rpc_dentry_operations = { +static const struct dentry_operations rpc_dentry_operations = { .d_delete = rpc_delete_dentry, }; -- cgit v0.10.2 From 585d3bc06f4ca57f975a5a1f698f65a45ea66225 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 25 Feb 2009 10:44:19 +0100 Subject: fs: move bdev code out of buffer.c Move some block device related code out from buffer.c and put it in block_dev.c. I'm trying to move non-buffer_head code out of buffer.c Signed-off-by: Al Viro diff --git a/fs/block_dev.c b/fs/block_dev.c index b3c1eff..8c3c689 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, iov, offset, nr_segs, blkdev_get_blocks, NULL); } +/* + * Write out and wait upon all the dirty data associated with a block + * device via its mapping. Does not take the superblock lock. + */ +int sync_blockdev(struct block_device *bdev) +{ + int ret = 0; + + if (bdev) + ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); + return ret; +} +EXPORT_SYMBOL(sync_blockdev); + +/* + * Write out and wait upon all dirty data associated with this + * device. Filesystem data as well as the underlying block + * device. Takes the superblock lock. + */ +int fsync_bdev(struct block_device *bdev) +{ + struct super_block *sb = get_super(bdev); + if (sb) { + int res = fsync_super(sb); + drop_super(sb); + return res; + } + return sync_blockdev(bdev); +} + +/** + * freeze_bdev -- lock a filesystem and force it into a consistent state + * @bdev: blockdevice to lock + * + * This takes the block device bd_mount_sem to make sure no new mounts + * happen on bdev until thaw_bdev() is called. + * If a superblock is found on this device, we take the s_umount semaphore + * on it to make sure nobody unmounts until the snapshot creation is done. + * The reference counter (bd_fsfreeze_count) guarantees that only the last + * unfreeze process can unfreeze the frozen filesystem actually when multiple + * freeze requests arrive simultaneously. It counts up in freeze_bdev() and + * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze + * actually. + */ +struct super_block *freeze_bdev(struct block_device *bdev) +{ + struct super_block *sb; + int error = 0; + + mutex_lock(&bdev->bd_fsfreeze_mutex); + if (bdev->bd_fsfreeze_count > 0) { + bdev->bd_fsfreeze_count++; + sb = get_super(bdev); + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return sb; + } + bdev->bd_fsfreeze_count++; + + down(&bdev->bd_mount_sem); + sb = get_super(bdev); + if (sb && !(sb->s_flags & MS_RDONLY)) { + sb->s_frozen = SB_FREEZE_WRITE; + smp_wmb(); + + __fsync_super(sb); + + sb->s_frozen = SB_FREEZE_TRANS; + smp_wmb(); + + sync_blockdev(sb->s_bdev); + + if (sb->s_op->freeze_fs) { + error = sb->s_op->freeze_fs(sb); + if (error) { + printk(KERN_ERR + "VFS:Filesystem freeze failed\n"); + sb->s_frozen = SB_UNFROZEN; + drop_super(sb); + up(&bdev->bd_mount_sem); + bdev->bd_fsfreeze_count--; + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return ERR_PTR(error); + } + } + } + + sync_blockdev(bdev); + mutex_unlock(&bdev->bd_fsfreeze_mutex); + + return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ +} +EXPORT_SYMBOL(freeze_bdev); + +/** + * thaw_bdev -- unlock filesystem + * @bdev: blockdevice to unlock + * @sb: associated superblock + * + * Unlocks the filesystem and marks it writeable again after freeze_bdev(). + */ +int thaw_bdev(struct block_device *bdev, struct super_block *sb) +{ + int error = 0; + + mutex_lock(&bdev->bd_fsfreeze_mutex); + if (!bdev->bd_fsfreeze_count) { + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return -EINVAL; + } + + bdev->bd_fsfreeze_count--; + if (bdev->bd_fsfreeze_count > 0) { + if (sb) + drop_super(sb); + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return 0; + } + + if (sb) { + BUG_ON(sb->s_bdev != bdev); + if (!(sb->s_flags & MS_RDONLY)) { + if (sb->s_op->unfreeze_fs) { + error = sb->s_op->unfreeze_fs(sb); + if (error) { + printk(KERN_ERR + "VFS:Filesystem thaw failed\n"); + sb->s_frozen = SB_FREEZE_TRANS; + bdev->bd_fsfreeze_count++; + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return error; + } + } + sb->s_frozen = SB_UNFROZEN; + smp_wmb(); + wake_up(&sb->s_wait_unfrozen); + } + drop_super(sb); + } + + up(&bdev->bd_mount_sem); + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return 0; +} +EXPORT_SYMBOL(thaw_bdev); + static int blkdev_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, blkdev_get_block, wbc); diff --git a/fs/buffer.c b/fs/buffer.c index 891e1c7..a2fd743 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) } /* - * Write out and wait upon all the dirty data associated with a block - * device via its mapping. Does not take the superblock lock. - */ -int sync_blockdev(struct block_device *bdev) -{ - int ret = 0; - - if (bdev) - ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); - return ret; -} -EXPORT_SYMBOL(sync_blockdev); - -/* - * Write out and wait upon all dirty data associated with this - * device. Filesystem data as well as the underlying block - * device. Takes the superblock lock. - */ -int fsync_bdev(struct block_device *bdev) -{ - struct super_block *sb = get_super(bdev); - if (sb) { - int res = fsync_super(sb); - drop_super(sb); - return res; - } - return sync_blockdev(bdev); -} - -/** - * freeze_bdev -- lock a filesystem and force it into a consistent state - * @bdev: blockdevice to lock - * - * This takes the block device bd_mount_sem to make sure no new mounts - * happen on bdev until thaw_bdev() is called. - * If a superblock is found on this device, we take the s_umount semaphore - * on it to make sure nobody unmounts until the snapshot creation is done. - * The reference counter (bd_fsfreeze_count) guarantees that only the last - * unfreeze process can unfreeze the frozen filesystem actually when multiple - * freeze requests arrive simultaneously. It counts up in freeze_bdev() and - * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze - * actually. - */ -struct super_block *freeze_bdev(struct block_device *bdev) -{ - struct super_block *sb; - int error = 0; - - mutex_lock(&bdev->bd_fsfreeze_mutex); - if (bdev->bd_fsfreeze_count > 0) { - bdev->bd_fsfreeze_count++; - sb = get_super(bdev); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return sb; - } - bdev->bd_fsfreeze_count++; - - down(&bdev->bd_mount_sem); - sb = get_super(bdev); - if (sb && !(sb->s_flags & MS_RDONLY)) { - sb->s_frozen = SB_FREEZE_WRITE; - smp_wmb(); - - __fsync_super(sb); - - sb->s_frozen = SB_FREEZE_TRANS; - smp_wmb(); - - sync_blockdev(sb->s_bdev); - - if (sb->s_op->freeze_fs) { - error = sb->s_op->freeze_fs(sb); - if (error) { - printk(KERN_ERR - "VFS:Filesystem freeze failed\n"); - sb->s_frozen = SB_UNFROZEN; - drop_super(sb); - up(&bdev->bd_mount_sem); - bdev->bd_fsfreeze_count--; - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return ERR_PTR(error); - } - } - } - - sync_blockdev(bdev); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - - return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ -} -EXPORT_SYMBOL(freeze_bdev); - -/** - * thaw_bdev -- unlock filesystem - * @bdev: blockdevice to unlock - * @sb: associated superblock - * - * Unlocks the filesystem and marks it writeable again after freeze_bdev(). - */ -int thaw_bdev(struct block_device *bdev, struct super_block *sb) -{ - int error = 0; - - mutex_lock(&bdev->bd_fsfreeze_mutex); - if (!bdev->bd_fsfreeze_count) { - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return -EINVAL; - } - - bdev->bd_fsfreeze_count--; - if (bdev->bd_fsfreeze_count > 0) { - if (sb) - drop_super(sb); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return 0; - } - - if (sb) { - BUG_ON(sb->s_bdev != bdev); - if (!(sb->s_flags & MS_RDONLY)) { - if (sb->s_op->unfreeze_fs) { - error = sb->s_op->unfreeze_fs(sb); - if (error) { - printk(KERN_ERR - "VFS:Filesystem thaw failed\n"); - sb->s_frozen = SB_FREEZE_TRANS; - bdev->bd_fsfreeze_count++; - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return error; - } - } - sb->s_frozen = SB_UNFROZEN; - smp_wmb(); - wake_up(&sb->s_wait_unfrozen); - } - drop_super(sb); - } - - up(&bdev->bd_mount_sem); - mutex_unlock(&bdev->bd_fsfreeze_mutex); - return 0; -} -EXPORT_SYMBOL(thaw_bdev); - -/* * Various filesystems appear to want __find_get_block to be non-blocking. * But it's the page lock which protects the buffers. To get around this, * we get exclusion from try_to_free_buffers with the blockdev mapping's diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index bd7ac79..f19fd90 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping); void unmap_underlying_metadata(struct block_device *bdev, sector_t block); void mark_buffer_async_write(struct buffer_head *bh); -void invalidate_bdev(struct block_device *); -int sync_blockdev(struct block_device *bdev); void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); -int fsync_bdev(struct block_device *); -struct super_block *freeze_bdev(struct block_device *); -int thaw_bdev(struct block_device *, struct super_block *); -int fsync_super(struct super_block *); -int fsync_no_super(struct block_device *); struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); struct buffer_head *__getblk(struct block_device *bdev, sector_t block, diff --git a/include/linux/fs.h b/include/linux/fs.h index 5f74d61..c2c4454 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1874,6 +1874,13 @@ extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, fmode_t); +extern void invalidate_bdev(struct block_device *); +extern int sync_blockdev(struct block_device *bdev); +extern struct super_block *freeze_bdev(struct block_device *); +extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); +extern int fsync_bdev(struct block_device *); +extern int fsync_super(struct super_block *); +extern int fsync_no_super(struct block_device *); #else static inline void bd_forget(struct inode *inode) {} #endif -- cgit v0.10.2 From a3ec947c85ec339884b30ef6a08133e9311fdae1 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 4 Mar 2009 12:06:34 -0800 Subject: vfs: simple_set_mnt() should return void simple_set_mnt() is defined as returning 'int' but always returns 0. Callers assume simple_set_mnt() never fails and don't properly cleanup if it were to _ever_ fail. For instance, get_sb_single() and get_sb_nodev() should: up_write(sb->s_unmount); deactivate_super(sb); if simple_set_mnt() fails. Since simple_set_mnt() never fails, would be cleaner if it did not return anything. [akpm@linux-foundation.org: fix build] Signed-off-by: Sukadev Bhattiprolu Acked-by: Serge Hallyn Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Al Viro diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 00d46e1..92285d0 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags, /* go */ sb->s_flags |= MS_ACTIVE; - return simple_set_mnt(mnt, sb); + simple_set_mnt(mnt, sb); + + return 0; /* new mountpoint for an already mounted superblock */ already_mounted: DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", mtd->index, mtd->name); - ret = simple_set_mnt(mnt, sb); + simple_set_mnt(mnt, sb); + ret = 0; goto out_put; out_error: diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 93212e4..5f8ab8a 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -168,8 +168,9 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, p9stat_free(st); kfree(st); -P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n"); - return simple_set_mnt(mnt, sb); +P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); + simple_set_mnt(mnt, sb); + return 0; release_sb: if (sb) { diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 13ea532..38491fd 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -606,7 +606,8 @@ cifs_get_sb(struct file_system_type *fs_type, return rc; } sb->s_flags |= MS_ACTIVE; - return simple_set_mnt(mnt, sb); + simple_set_mnt(mnt, sb); + return 0; } static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 140b431..b0a7634 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -454,7 +454,8 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; } do_remount_sb(s, flags, data, 0); - return simple_set_mnt(mnt, s); + simple_set_mnt(mnt, s); + return 0; } /* diff --git a/fs/libfs.c b/fs/libfs.c index ec600bd..4910a36 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -242,7 +242,8 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, d_instantiate(dentry, root); s->s_root = dentry; s->s_flags |= MS_ACTIVE; - return simple_set_mnt(mnt, s); + simple_set_mnt(mnt, s); + return 0; Enomem: up_write(&s->s_umount); diff --git a/fs/namespace.c b/fs/namespace.c index 06f8e63..2432ca6 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -397,11 +397,10 @@ static void __mnt_unmake_readonly(struct vfsmount *mnt) spin_unlock(&vfsmount_lock); } -int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) +void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) { mnt->mnt_sb = sb; mnt->mnt_root = dget(sb->s_root); - return 0; } EXPORT_SYMBOL(simple_set_mnt); diff --git a/fs/proc/root.c b/fs/proc/root.c index f6299a2..1e15a2b 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -83,7 +83,8 @@ static int proc_get_sb(struct file_system_type *fs_type, ns->proc_mnt = mnt; } - return simple_set_mnt(mnt, sb); + simple_set_mnt(mnt, sb); + return 0; } static void proc_kill_sb(struct super_block *sb) diff --git a/fs/super.c b/fs/super.c index 6ce5014..e512fab 100644 --- a/fs/super.c +++ b/fs/super.c @@ -831,7 +831,8 @@ int get_sb_bdev(struct file_system_type *fs_type, bdev->bd_super = s; } - return simple_set_mnt(mnt, s); + simple_set_mnt(mnt, s); + return 0; error_s: error = PTR_ERR(s); @@ -877,7 +878,8 @@ int get_sb_nodev(struct file_system_type *fs_type, return error; } s->s_flags |= MS_ACTIVE; - return simple_set_mnt(mnt, s); + simple_set_mnt(mnt, s); + return 0; } EXPORT_SYMBOL(get_sb_nodev); @@ -909,7 +911,8 @@ int get_sb_single(struct file_system_type *fs_type, s->s_flags |= MS_ACTIVE; } do_remount_sb(s, flags, data, 0); - return simple_set_mnt(mnt, s); + simple_set_mnt(mnt, s); + return 0; } EXPORT_SYMBOL(get_sb_single); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1182b66..c5c9835 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2034,7 +2034,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags, /* 'fill_super()' opens ubi again so we must close it here */ ubi_close_volume(ubi); - return simple_set_mnt(mnt, sb); + simple_set_mnt(mnt, sb); + return 0; out_deact: up_write(&sb->s_umount); diff --git a/include/linux/fs.h b/include/linux/fs.h index c2c4454..a7d7391 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1719,7 +1719,7 @@ struct super_block *sget(struct file_system_type *type, extern int get_sb_pseudo(struct file_system_type *, char *, const struct super_operations *ops, unsigned long, struct vfsmount *mnt); -extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); +extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); int __put_super_and_need_restart(struct super_block *sb); /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b01100e..c500ca7 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1071,7 +1071,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type, mutex_unlock(&cgroup_mutex); } - return simple_set_mnt(mnt, sb); + simple_set_mnt(mnt, sb); + return 0; free_cg_links: free_cg_links(&tmp_cg_links); -- cgit v0.10.2 From fdbf5348661ac9d519164d1489f30cc0384fda58 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Sat, 7 Mar 2009 10:16:20 -0800 Subject: Unroll essentials of do_remount_sb() into devpts On remount, devpts fs only needs to parse the mount options. Users cannot directly create/dirty files in /dev/pts so the MS_RDONLY flag and shrinking the dcache does not really apply to devpts. So effectively on remount, devpts only parses the mount options and updates these options in its super block. As such, we could replace do_remount_sb() call with a direct parse_mount_options(). Doing so enables subsequent patches to avoid parsing the mount options twice and simplify the code. Signed-off-by: Sukadev Bhattiprolu Acked-by: Serge Hallyn Signed-off-by: Al Viro diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b0a7634..fb4da9d 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -437,6 +437,8 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, void *data, struct vfsmount *mnt) { struct super_block *s; + struct pts_mount_opts *opts; + struct pts_fs_info *fsi; int error; s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); @@ -453,7 +455,10 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, } s->s_flags |= MS_ACTIVE; } - do_remount_sb(s, flags, data, 0); + fsi = DEVPTS_SB(s); + opts = &fsi->mount_opts; + parse_mount_options(data, PARSE_REMOUNT, opts); + simple_set_mnt(mnt, s); return 0; } -- cgit v0.10.2 From 482984f06df54d886995a4383d2f5bb85e3de945 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Sat, 7 Mar 2009 10:14:41 -0800 Subject: Parse mount options just once and copy them to super block Since all the mount option parsing is done in devpts, we could do it just once and pass it around in devpts functions and eventually store it in the super block. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Al Viro diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index fb4da9d..70013dd 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -322,60 +322,21 @@ static int compare_init_pts_sb(struct super_block *s, void *p) } /* - * Safely parse the mount options in @data and update @opts. - * - * devpts ends up parsing options two times during mount, due to the - * two modes of operation it supports. The first parse occurs in - * devpts_get_sb() when determining the mode (single-instance or - * multi-instance mode). The second parse happens in devpts_remount() - * or new_pts_mount() depending on the mode. - * - * Parsing of options modifies the @data making subsequent parsing - * incorrect. So make a local copy of @data and parse it. - * - * Return: 0 On success, -errno on error - */ -static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts) -{ - int rc; - void *datacp; - - if (!data) - return 0; - - /* Use kstrdup() ? */ - datacp = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!datacp) - return -ENOMEM; - - memcpy(datacp, data, PAGE_SIZE); - rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts); - kfree(datacp); - - return rc; -} - -/* * Mount a new (private) instance of devpts. PTYs created in this * instance are independent of the PTYs in other devpts instances. */ static int new_pts_mount(struct file_system_type *fs_type, int flags, - void *data, struct vfsmount *mnt) + void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) { int err; struct pts_fs_info *fsi; - struct pts_mount_opts *opts; err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt); if (err) return err; fsi = DEVPTS_SB(mnt->mnt_sb); - opts = &fsi->mount_opts; - - err = parse_mount_options(data, PARSE_MOUNT, opts); - if (err) - goto fail; + memcpy(&fsi->mount_opts, opts, sizeof(opts)); err = mknod_ptmx(mnt->mnt_sb); if (err) @@ -391,28 +352,6 @@ fail: } /* - * Check if 'newinstance' mount option was specified in @data. - * - * Return: -errno on error (eg: invalid mount options specified) - * : 1 if 'newinstance' mount option was specified - * : 0 if 'newinstance' mount option was NOT specified - */ -static int is_new_instance_mount(void *data) -{ - int rc; - struct pts_mount_opts opts; - - if (!data) - return 0; - - rc = safe_parse_mount_options(data, &opts); - if (!rc) - rc = opts.newinstance; - - return rc; -} - -/* * get_init_pts_sb() * * This interface is needed to support multiple namespace semantics in @@ -434,10 +373,9 @@ static int is_new_instance_mount(void *data) * presence of the private namespace (i.e 'newinstance') super-blocks. */ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, - void *data, struct vfsmount *mnt) + void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) { struct super_block *s; - struct pts_mount_opts *opts; struct pts_fs_info *fsi; int error; @@ -455,11 +393,12 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, } s->s_flags |= MS_ACTIVE; } - fsi = DEVPTS_SB(s); - opts = &fsi->mount_opts; - parse_mount_options(data, PARSE_REMOUNT, opts); simple_set_mnt(mnt, s); + + fsi = DEVPTS_SB(mnt->mnt_sb); + memcpy(&fsi->mount_opts, opts, sizeof(opts)); + return 0; } @@ -469,11 +408,11 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, * kernel still allows multiple-instances. */ static int init_pts_mount(struct file_system_type *fs_type, int flags, - void *data, struct vfsmount *mnt) + void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) { int err; - err = get_init_pts_sb(fs_type, flags, data, mnt); + err = get_init_pts_sb(fs_type, flags, data, opts, mnt); if (err) return err; @@ -490,17 +429,22 @@ static int init_pts_mount(struct file_system_type *fs_type, int flags, static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - int new; - - new = is_new_instance_mount(data); - if (new < 0) - return new; + int error; + struct pts_mount_opts opts; - if (new) - return new_pts_mount(fs_type, flags, data, mnt); + memset(&opts, 0, sizeof(opts)); + if (data) { + error = parse_mount_options(data, PARSE_MOUNT, &opts); + if (error) + return error; + } - return init_pts_mount(fs_type, flags, data, mnt); + if (opts.newinstance) + return new_pts_mount(fs_type, flags, data, &opts, mnt); + else + return init_pts_mount(fs_type, flags, data, &opts, mnt); } + #else /* * This supports only the legacy single-instance semantics (no -- cgit v0.10.2 From 945cf2c79f6fbb1b74e3b0ca08f48b6af56ad412 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Sat, 7 Mar 2009 10:11:41 -0800 Subject: Move common mknod_ptmx() calls into caller We create 'ptmx' node in both single-instance and multiple-instance mounts. So devpts_get_sb() can call mknod_ptmx() once rather than have both modes calling mknod_ptmx() separately. Signed-off-by: Sukadev Bhattiprolu Acked-by: Serge Hallyn Signed-off-by: Al Viro diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 70013dd..58b7190 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -338,17 +338,7 @@ static int new_pts_mount(struct file_system_type *fs_type, int flags, fsi = DEVPTS_SB(mnt->mnt_sb); memcpy(&fsi->mount_opts, opts, sizeof(opts)); - err = mknod_ptmx(mnt->mnt_sb); - if (err) - goto fail; - return 0; - -fail: - dput(mnt->mnt_sb->s_root); - up_write(&mnt->mnt_sb->s_umount); - deactivate_super(mnt->mnt_sb); - return err; } /* @@ -416,13 +406,6 @@ static int init_pts_mount(struct file_system_type *fs_type, int flags, if (err) return err; - err = mknod_ptmx(mnt->mnt_sb); - if (err) { - dput(mnt->mnt_sb->s_root); - up_write(&mnt->mnt_sb->s_umount); - deactivate_super(mnt->mnt_sb); - } - return err; } @@ -440,9 +423,24 @@ static int devpts_get_sb(struct file_system_type *fs_type, } if (opts.newinstance) - return new_pts_mount(fs_type, flags, data, &opts, mnt); + error = new_pts_mount(fs_type, flags, data, &opts, mnt); else - return init_pts_mount(fs_type, flags, data, &opts, mnt); + error = init_pts_mount(fs_type, flags, data, &opts, mnt); + + if (error) + return error; + + error = mknod_ptmx(mnt->mnt_sb); + if (error) + goto out_dput; + + return 0; + +out_dput: + dput(mnt->mnt_sb->s_root); + up_write(&mnt->mnt_sb->s_umount); + deactivate_super(mnt->mnt_sb); + return error; } #else -- cgit v0.10.2 From 289f00e225a6f60056644e0fd7e4081cb140c631 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Sat, 7 Mar 2009 10:12:06 -0800 Subject: Remove get_init_pts_sb() With mknod_ptmx() moved to devpts_get_sb(), init_pts_mount() becomes a wrapper around get_init_pts_sb(). Remove get_init_pts_sb() and fold code into init_pts_mount(). Signed-off-by: Sukadev Bhattiprolu Acked-by: Serge Hallyn Signed-off-by: Al Viro diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 58b7190..9c775fa 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -342,7 +342,11 @@ static int new_pts_mount(struct file_system_type *fs_type, int flags, } /* - * get_init_pts_sb() + * init_pts_mount() + * + * Mount or remount the initial kernel mount of devpts. This type of + * mount maintains the legacy, single-instance semantics, while the + * kernel still allows multiple-instances. * * This interface is needed to support multiple namespace semantics in * devpts while preserving backward compatibility of the current 'single- @@ -362,7 +366,7 @@ static int new_pts_mount(struct file_system_type *fs_type, int flags, * consistently selects the 'single-namespace' superblock even in the * presence of the private namespace (i.e 'newinstance') super-blocks. */ -static int get_init_pts_sb(struct file_system_type *fs_type, int flags, +static int init_pts_mount(struct file_system_type *fs_type, int flags, void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) { struct super_block *s; @@ -392,23 +396,6 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, return 0; } -/* - * Mount or remount the initial kernel mount of devpts. This type of - * mount maintains the legacy, single-instance semantics, while the - * kernel still allows multiple-instances. - */ -static int init_pts_mount(struct file_system_type *fs_type, int flags, - void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) -{ - int err; - - err = get_init_pts_sb(fs_type, flags, data, opts, mnt); - if (err) - return err; - - return err; -} - static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { -- cgit v0.10.2 From 1bd7903560f1f713e85188a5aaf4d2428b6c8b50 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Sat, 7 Mar 2009 10:12:32 -0800 Subject: Merge code for single and multiple-instance mounts new_pts_mount() (including the get_sb_nodev()), shares a lot of code with init_pts_mount(). The only difference between them is the 'test-super' function passed into sget(). Move all common code into devpts_get_sb() and remove the new_pts_mount() and init_pts_mount() functions, Changelog[v3]: [Serge Hallyn]: Remove unnecessary printk()s Changelog[v2]: (Christoph Hellwig): Merge code in 'do_pts_mount()' into devpts_get_sb() Signed-off-by: Sukadev Bhattiprolu Acked-by: Serge Hallyn Tested-by: Serge Hallyn Signed-off-by: Al Viro diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 9c775fa..63a4a59 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -322,85 +322,38 @@ static int compare_init_pts_sb(struct super_block *s, void *p) } /* - * Mount a new (private) instance of devpts. PTYs created in this - * instance are independent of the PTYs in other devpts instances. - */ -static int new_pts_mount(struct file_system_type *fs_type, int flags, - void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) -{ - int err; - struct pts_fs_info *fsi; - - err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt); - if (err) - return err; - - fsi = DEVPTS_SB(mnt->mnt_sb); - memcpy(&fsi->mount_opts, opts, sizeof(opts)); - - return 0; -} - -/* - * init_pts_mount() + * devpts_get_sb() + * + * If the '-o newinstance' mount option was specified, mount a new + * (private) instance of devpts. PTYs created in this instance are + * independent of the PTYs in other devpts instances. + * + * If the '-o newinstance' option was not specified, mount/remount the + * initial kernel mount of devpts. This type of mount gives the + * legacy, single-instance semantics. * - * Mount or remount the initial kernel mount of devpts. This type of - * mount maintains the legacy, single-instance semantics, while the - * kernel still allows multiple-instances. + * The 'newinstance' option is needed to support multiple namespace + * semantics in devpts while preserving backward compatibility of the + * current 'single-namespace' semantics. i.e all mounts of devpts + * without the 'newinstance' mount option should bind to the initial + * kernel mount, like get_sb_single(). * - * This interface is needed to support multiple namespace semantics in - * devpts while preserving backward compatibility of the current 'single- - * namespace' semantics. i.e all mounts of devpts without the 'newinstance' - * mount option should bind to the initial kernel mount, like - * get_sb_single(). + * Mounts with 'newinstance' option create a new, private namespace. * - * Mounts with 'newinstance' option create a new private namespace. + * NOTE: * - * But for single-mount semantics, devpts cannot use get_sb_single(), + * For single-mount semantics, devpts cannot use get_sb_single(), * because get_sb_single()/sget() find and use the super-block from * the most recent mount of devpts. But that recent mount may be a * 'newinstance' mount and get_sb_single() would pick the newinstance * super-block instead of the initial super-block. - * - * This interface is identical to get_sb_single() except that it - * consistently selects the 'single-namespace' superblock even in the - * presence of the private namespace (i.e 'newinstance') super-blocks. */ -static int init_pts_mount(struct file_system_type *fs_type, int flags, - void *data, struct pts_mount_opts *opts, struct vfsmount *mnt) -{ - struct super_block *s; - struct pts_fs_info *fsi; - int error; - - s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); - if (IS_ERR(s)) - return PTR_ERR(s); - - if (!s->s_root) { - s->s_flags = flags; - error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); - if (error) { - up_write(&s->s_umount); - deactivate_super(s); - return error; - } - s->s_flags |= MS_ACTIVE; - } - - simple_set_mnt(mnt, s); - - fsi = DEVPTS_SB(mnt->mnt_sb); - memcpy(&fsi->mount_opts, opts, sizeof(opts)); - - return 0; -} - static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { int error; struct pts_mount_opts opts; + struct super_block *s; memset(&opts, 0, sizeof(opts)); if (data) { @@ -410,23 +363,37 @@ static int devpts_get_sb(struct file_system_type *fs_type, } if (opts.newinstance) - error = new_pts_mount(fs_type, flags, data, &opts, mnt); + s = sget(fs_type, NULL, set_anon_super, NULL); else - error = init_pts_mount(fs_type, flags, data, &opts, mnt); + s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); - if (error) - return error; + if (IS_ERR(s)) + return PTR_ERR(s); + + if (!s->s_root) { + s->s_flags = flags; + error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) + goto out_undo_sget; + s->s_flags |= MS_ACTIVE; + } + + simple_set_mnt(mnt, s); + + memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts)); - error = mknod_ptmx(mnt->mnt_sb); + error = mknod_ptmx(s); if (error) goto out_dput; return 0; out_dput: - dput(mnt->mnt_sb->s_root); - up_write(&mnt->mnt_sb->s_umount); - deactivate_super(mnt->mnt_sb); + dput(s->s_root); + +out_undo_sget: + up_write(&s->s_umount); + deactivate_super(s); return error; } -- cgit v0.10.2 From aabb8fdb41128705fd1627f56fdd571e45fdbcdb Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 11 Mar 2009 13:17:36 -0700 Subject: fs: avoid I_NEW inodes To be on the safe side, it should be less fragile to exclude I_NEW inodes from inode list scans by default (unless there is an important reason to have them). Normally they will get excluded (eg. by zero refcount or writecount etc), however it is a bit fragile for list walkers to know exactly what parts of the inode state is set up and valid to test when in I_NEW. So along these lines, move I_NEW checks upward as well (sometimes taking I_FREEING etc checks with them too -- this shouldn't be a problem should it?) Signed-off-by: Nick Piggin Acked-by: Jan Kara Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Al Viro diff --git a/fs/dquot.c b/fs/dquot.c index bca3cac..cb1c3bc 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -789,12 +789,12 @@ static void add_dquot_ref(struct super_block *sb, int type) spin_lock(&inode_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) + continue; if (!atomic_read(&inode->i_writecount)) continue; if (!dqinit_needed(inode, type)) continue; - if (inode->i_state & (I_FREEING|I_WILL_FREE)) - continue; __iget(inode); spin_unlock(&inode_lock); @@ -870,6 +870,12 @@ static void remove_dquot_ref(struct super_block *sb, int type, spin_lock(&inode_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + /* + * We have to scan also I_NEW inodes because they can already + * have quota pointer initialized. Luckily, we need to touch + * only quota pointers and these have separate locking + * (dqptr_sem). + */ if (!IS_NOQUOTA(inode)) remove_inode_dquot_ref(inode, type, tofree_head); } diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 3e5637f..44d725f 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb) spin_lock(&inode_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { - if (inode->i_state & (I_FREEING|I_WILL_FREE)) + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) continue; if (inode->i_mapping->nrpages == 0) continue; diff --git a/fs/inode.c b/fs/inode.c index 826fb0b..06aa5a1 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -356,6 +356,8 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) if (tmp == head) break; inode = list_entry(tmp, struct inode, i_sb_list); + if (inode->i_state & I_NEW) + continue; invalidate_inode_buffers(inode); if (!atomic_read(&inode->i_count)) { list_move(&inode->i_list, dispose); diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c index 331f2e8..220c13f 100644 --- a/fs/notify/inotify/inotify.c +++ b/fs/notify/inotify/inotify.c @@ -380,6 +380,14 @@ void inotify_unmount_inodes(struct list_head *list) struct list_head *watches; /* + * We cannot __iget() an inode in state I_CLEAR, I_FREEING, + * I_WILL_FREE, or I_NEW which is fine because by that point + * the inode cannot have any associated watches. + */ + if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) + continue; + + /* * If i_count is zero, the inode cannot have any watches and * doing an __iget/iput with MS_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is @@ -388,14 +396,6 @@ void inotify_unmount_inodes(struct list_head *list) if (!atomic_read(&inode->i_count)) continue; - /* - * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or - * I_WILL_FREE which is fine because by that point the inode - * cannot have any associated watches. - */ - if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) - continue; - need_iput_tmp = need_iput; need_iput = NULL; /* In case inotify_remove_watch_locked() drops a reference. */ -- cgit v0.10.2 From 32a0f488ce5e8a9a148491f15edc508ab5e8265b Mon Sep 17 00:00:00 2001 From: Beat Michel Liechti Date: Thu, 26 Mar 2009 22:36:52 +0100 Subject: DVB: firedtv: FireDTV S2 problems with tuning solved Signed-off-by: Beat Michel Liechti Tuning was broken on FireDTV S2 (and presumably FloppyDTV S2) because a wrong opcode was sent. The box only gave "not implemented" responses. Changing the opcode to _TUNE_QPSK2 fixes this for good. Cc: stable@kernel.org Signed-off-by: Stefan Richter diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c index 1a300f0..32526f1 100644 --- a/drivers/media/dvb/firewire/firedtv-avc.c +++ b/drivers/media/dvb/firewire/firedtv-avc.c @@ -135,6 +135,7 @@ static const char *debug_fcp_opcode(unsigned int opcode, case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; + case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2"; case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; } @@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv, c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; - c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; + if (fdtv->type == FIREDTV_DVB_S2) + c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2; + else + c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; c->operand[4] = (params->frequency >> 24) & 0xff; c->operand[5] = (params->frequency >> 16) & 0xff; -- cgit v0.10.2 From 568d9a8f6d4bf81e0672c74573dc02981d31e3ea Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 12 Mar 2009 16:27:11 -0700 Subject: drm/i915: Change DCC tiling detection case to cover only mobile parts. Later spec investigation has revealed that every 9xx mobile part has had this register in this format. Also, no non-mobile parts have been shown to have this register. So make all mobile use the same code, and all non-mobile use the hack 965 detection. Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7fb4191..4cce1ae 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || - IS_GM45(dev)) { + } else if (IS_MOBILE(dev)) { uint32_t dcc; - /* On 915-945 and GM965, channel interleave by the CPU is - * determined by DCC. The CPU will alternate based on bit 6 - * in interleaved mode, and the GPU will then also alternate - * on bit 6, 9, and 10 for X, but the CPU may also optionally - * alternate based on bit 17 (XOR not disabled and XOR - * bit == 17). + /* On mobile 9xx chipsets, channel interleave by the CPU is + * determined by DCC. For single-channel, neither the CPU + * nor the GPU do swizzling. For dual channel interleaved, + * the GPU's interleave is bit 9 and 10 for X tiled, and bit + * 9 for Y tiled. The CPU's interleave is independent, and + * can be based on either bit 11 (haven't seen this yet) or + * bit 17 (common). */ dcc = I915_READ(DCC); switch (dcc & DCC_ADDRESSING_MODE_MASK) { @@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_y = I915_BIT_6_SWIZZLE_NONE; break; case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: - if (IS_I915G(dev) || IS_I915GM(dev) || - dcc & DCC_CHANNEL_XOR_DISABLE) { + if (dcc & DCC_CHANNEL_XOR_DISABLE) { + /* This is the base swizzling by the GPU for + * tiled buffers. + */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if ((IS_I965GM(dev) || IS_GM45(dev)) && - (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { - /* GM965/GM45 does either bit 11 or bit 17 - * swizzling. - */ + } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { + /* Bit 11 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11; } else { - /* Bit 17 or perhaps other swizzling */ + /* Bit 17 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } -- cgit v0.10.2 From 044c7c415a68077b7c444c753aa03a35149e881a Mon Sep 17 00:00:00 2001 From: Ma Ling Date: Wed, 18 Mar 2009 20:13:23 +0800 Subject: drm/i915: Use documented PLL timing limits for G4X platform The values come from the internal reference spreadsheet on PLL timing limits for the G4X chipsets. Part of fixing fd.o bug #17508 Signed-off-by: Ma Ling [anholt: Cleaned up some whitespace] Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a283427..89f7af0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -115,6 +115,89 @@ typedef struct { #define INTEL_LIMIT_I8XX_LVDS 1 #define INTEL_LIMIT_I9XX_SDVO_DAC 2 #define INTEL_LIMIT_I9XX_LVDS 3 +#define INTEL_LIMIT_G4X_SDVO 4 +#define INTEL_LIMIT_G4X_HDMI_DAC 5 +#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 +#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 + +/*The parameter is for SDVO on G4x platform*/ +#define G4X_DOT_SDVO_MIN 25000 +#define G4X_DOT_SDVO_MAX 270000 +#define G4X_VCO_MIN 1750000 +#define G4X_VCO_MAX 3500000 +#define G4X_N_SDVO_MIN 1 +#define G4X_N_SDVO_MAX 4 +#define G4X_M_SDVO_MIN 104 +#define G4X_M_SDVO_MAX 138 +#define G4X_M1_SDVO_MIN 17 +#define G4X_M1_SDVO_MAX 23 +#define G4X_M2_SDVO_MIN 5 +#define G4X_M2_SDVO_MAX 11 +#define G4X_P_SDVO_MIN 10 +#define G4X_P_SDVO_MAX 30 +#define G4X_P1_SDVO_MIN 1 +#define G4X_P1_SDVO_MAX 3 +#define G4X_P2_SDVO_SLOW 10 +#define G4X_P2_SDVO_FAST 10 +#define G4X_P2_SDVO_LIMIT 270000 + +/*The parameter is for HDMI_DAC on G4x platform*/ +#define G4X_DOT_HDMI_DAC_MIN 22000 +#define G4X_DOT_HDMI_DAC_MAX 400000 +#define G4X_N_HDMI_DAC_MIN 1 +#define G4X_N_HDMI_DAC_MAX 4 +#define G4X_M_HDMI_DAC_MIN 104 +#define G4X_M_HDMI_DAC_MAX 138 +#define G4X_M1_HDMI_DAC_MIN 16 +#define G4X_M1_HDMI_DAC_MAX 23 +#define G4X_M2_HDMI_DAC_MIN 5 +#define G4X_M2_HDMI_DAC_MAX 11 +#define G4X_P_HDMI_DAC_MIN 5 +#define G4X_P_HDMI_DAC_MAX 80 +#define G4X_P1_HDMI_DAC_MIN 1 +#define G4X_P1_HDMI_DAC_MAX 8 +#define G4X_P2_HDMI_DAC_SLOW 10 +#define G4X_P2_HDMI_DAC_FAST 5 +#define G4X_P2_HDMI_DAC_LIMIT 165000 + +/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ +#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 +#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 +#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 +#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 +#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 +#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 +#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 +#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 +#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 +#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 +#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 +#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 +#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 +#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 +#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 +#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 +#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 + +/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ +#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 +#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 +#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 +#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 +#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 +#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 +#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 +#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 +#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 +#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 +#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 +#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 +#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 +#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 +#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 +#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 +#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 + static const intel_limit_t intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ @@ -168,14 +251,116 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, }, + /* below parameter and function is for G4X Chipset Family*/ + { /* INTEL_LIMIT_G4X_SDVO */ + .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, + .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, + .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, + .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, + .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, + .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, + .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, + .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, + .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, + .p2_slow = G4X_P2_SDVO_SLOW, + .p2_fast = G4X_P2_SDVO_FAST + }, + }, + { /* INTEL_LIMIT_G4X_HDMI_DAC */ + .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, + .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, + .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, + .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, + .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, + .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, + .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, + .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, + .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, + .p2_slow = G4X_P2_HDMI_DAC_SLOW, + .p2_fast = G4X_P2_HDMI_DAC_FAST + }, + }, + { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ + .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, + .vco = { .min = G4X_VCO_MIN, + .max = G4X_VCO_MAX }, + .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, + .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, + .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, + .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, + .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, + .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, + .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, + .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, + .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST + }, + }, + { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ + .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, + .vco = { .min = G4X_VCO_MIN, + .max = G4X_VCO_MAX }, + .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, + .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, + .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, + .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, + .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, + .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, + .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, + .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, + .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST + }, + }, }; +static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const intel_limit_t *limit; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + /* LVDS with dual channel */ + limit = &intel_limits + [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; + else + /* LVDS with dual channel */ + limit = &intel_limits + [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { + limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { + limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; + } else /* The option is for other outputs */ + limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + + return limit; +} + static const intel_limit_t *intel_limit(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; const intel_limit_t *limit; - if (IS_I9XX(dev)) { + if (IS_G4X(dev)) { + limit = intel_g4x_limit(crtc); + } else if (IS_I9XX(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; else -- cgit v0.10.2 From d490609321828c62e8dfa6220f0acd82e5cb3756 Mon Sep 17 00:00:00 2001 From: Ma Ling Date: Wed, 18 Mar 2009 20:13:27 +0800 Subject: drm/i915: Use a different PLL timing search function on G4X. This improves the PLL timings according to the suggestion of the hardware engineers. This results in some outputs being able to sync that weren't able to before. This is part of fixing fd.o bug #17508. Signed-off-by: Ma Ling [anholt: cleaned up a couple of redundant comments] Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 89f7af0..8e29545 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -56,11 +56,13 @@ typedef struct { } intel_p2_t; #define INTEL_P2_NUM 2 - -typedef struct { +typedef struct intel_limit intel_limit_t; +struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; intel_p2_t p2; -} intel_limit_t; + bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, + int, int, intel_clock_t *); +}; #define I8XX_DOT_MIN 25000 #define I8XX_DOT_MAX 350000 @@ -198,6 +200,12 @@ typedef struct { #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 +static bool +intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); +static bool +intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); static const intel_limit_t intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ @@ -211,6 +219,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_I8XX_LVDS */ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, @@ -223,6 +232,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_I9XX_SDVO_DAC */ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, @@ -235,6 +245,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_I9XX_LVDS */ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, @@ -250,6 +261,7 @@ static const intel_limit_t intel_limits[] = { */ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, + .find_pll = intel_find_best_PLL, }, /* below parameter and function is for G4X Chipset Family*/ { /* INTEL_LIMIT_G4X_SDVO */ @@ -265,6 +277,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_SDVO_SLOW, .p2_fast = G4X_P2_SDVO_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, { /* INTEL_LIMIT_G4X_HDMI_DAC */ .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, @@ -279,6 +292,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_HDMI_DAC_SLOW, .p2_fast = G4X_P2_HDMI_DAC_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, @@ -301,6 +315,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, @@ -323,6 +338,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, }; @@ -437,18 +453,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) return true; } -/** - * Returns a set of divisors for the desired target clock with the given - * refclk, or FALSE. The returned values represent the clock equation: - * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. - */ -static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, - int refclk, intel_clock_t *best_clock) +static bool +intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) + { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; - const intel_limit_t *limit = intel_limit(crtc); int err = target; if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && @@ -500,6 +512,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, return (err != target); } +static bool +intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + intel_clock_t clock; + int max_n; + bool found; + /* approximately equals target * 0.00488 */ + int err_most = (target >> 8) + (target >> 10); + found = false; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + max_n = limit->n.max; + /* based on hardware requriment prefer smaller n to precision */ + for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { + /* based on hardware requirment prefere larger m1,m2, p1 */ + for (clock.m1 = limit->m1.max; + clock.m1 >= limit->m1.min; clock.m1--) { + for (clock.m2 = limit->m2.max; + clock.m2 >= limit->m2.min; clock.m2--) { + for (clock.p1 = limit->p1.max; + clock.p1 >= limit->p1.min; clock.p1--) { + int this_err; + + intel_clock(refclk, &clock); + if (!intel_PLL_is_valid(crtc, &clock)) + continue; + this_err = abs(clock.dot - target) ; + if (this_err < err_most) { + *best_clock = clock; + err_most = this_err; + max_n = clock.n; + found = true; + } + } + } + } + } + + return found; +} + void intel_wait_for_vblank(struct drm_device *dev) { @@ -918,6 +987,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, bool is_crt = false, is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + const intel_limit_t *limit; int ret; drm_vblank_pre_modeset(dev, pipe); @@ -961,7 +1031,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk = 48000; } - ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); + /* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ + limit = intel_limit(crtc); + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; -- cgit v0.10.2 From 13520b051e8888dd3af9bda639d83e7df76613d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Fri, 13 Mar 2009 15:42:14 -0400 Subject: drm/i915: Read the right SDVO register when detecting SVDO/HDMI. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes incorrect detection of the second SDVO/HDMI output on G4X, and extra boot time on pre-G4X. Signed-off-by: Kristian Høgsberg Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8e29545..0d40b4b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1735,13 +1735,21 @@ static void intel_setup_outputs(struct drm_device *dev) if (IS_I9XX(dev)) { int found; + u32 reg; if (I915_READ(SDVOB) & SDVO_DETECTED) { found = intel_sdvo_init(dev, SDVOB); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOB); } - if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { + + /* Before G4X SDVOC doesn't have its own detect register */ + if (IS_G4X(dev)) + reg = SDVOC; + else + reg = SDVOB; + + if (I915_READ(reg) & SDVO_DETECTED) { found = intel_sdvo_init(dev, SDVOC); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOC); -- cgit v0.10.2 From 3de09aa3b38910d366f4710ffdf430c9d387d1a3 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 9 Mar 2009 09:42:23 -0700 Subject: drm/i915: Fix lock order reversal in GTT pwrite path. Since the pagefault path determines that the lock order we use has to be mmap_sem -> struct_mutex, we can't allow page faults to occur while the struct_mutex is held. To fix this in pwrite, we first try optimistically to see if we can copy from user without faulting. If it fails, fall back to using get_user_pages to pin the user's memory, and map those pages atomically when copying it to the GPU. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 37427e4..35f8c7bd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -223,29 +223,34 @@ fast_user_write(struct io_mapping *mapping, */ static inline int -slow_user_write(struct io_mapping *mapping, - loff_t page_base, int page_offset, - char __user *user_data, - int length) +slow_kernel_write(struct io_mapping *mapping, + loff_t gtt_base, int gtt_offset, + struct page *user_page, int user_offset, + int length) { - char __iomem *vaddr; + char *src_vaddr, *dst_vaddr; unsigned long unwritten; - vaddr = io_mapping_map_wc(mapping, page_base); - if (vaddr == NULL) - return -EFAULT; - unwritten = __copy_from_user(vaddr + page_offset, - user_data, length); - io_mapping_unmap(vaddr); + dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); + src_vaddr = kmap_atomic(user_page, KM_USER1); + unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, + src_vaddr + user_offset, + length); + kunmap_atomic(src_vaddr, KM_USER1); + io_mapping_unmap_atomic(dst_vaddr); if (unwritten) return -EFAULT; return 0; } +/** + * This is the fast pwrite path, where we copy the data directly from the + * user into the GTT, uncached. + */ static int -i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) +i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = obj->driver_private; drm_i915_private_t *dev_priv = dev->dev_private; @@ -273,7 +278,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, obj_priv = obj->driver_private; offset = obj_priv->gtt_offset + args->offset; - obj_priv->dirty = 1; while (remain > 0) { /* Operation in this page @@ -292,16 +296,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, page_offset, user_data, page_length); /* If we get a fault while copying data, then (presumably) our - * source page isn't available. In this case, use the - * non-atomic function + * source page isn't available. Return the error and we'll + * retry in the slow path. */ - if (ret) { - ret = slow_user_write (dev_priv->mm.gtt_mapping, - page_base, page_offset, - user_data, page_length); - if (ret) - goto fail; - } + if (ret) + goto fail; remain -= page_length; user_data += page_length; @@ -315,6 +314,115 @@ fail: return ret; } +/** + * This is the fallback GTT pwrite path, which uses get_user_pages to pin + * the memory and maps it using kmap_atomic for copying. + * + * This code resulted in x11perf -rgb10text consuming about 10% more CPU + * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). + */ +static int +i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + drm_i915_private_t *dev_priv = dev->dev_private; + ssize_t remain; + loff_t gtt_page_base, offset; + loff_t first_data_page, last_data_page, num_pages; + loff_t pinned_pages, i; + struct page **user_pages; + struct mm_struct *mm = current->mm; + int gtt_page_offset, data_page_offset, data_page_index, page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, and all of the pwrite implementations + * want to hold it while dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto out_unpin_pages; + } + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(obj, 0); + if (ret) + goto out_unlock; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto out_unpin_object; + + obj_priv = obj->driver_private; + offset = obj_priv->gtt_offset + args->offset; + + while (remain > 0) { + /* Operation in this page + * + * gtt_page_base = page offset within aperture + * gtt_page_offset = offset within page in aperture + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + gtt_page_base = offset & PAGE_MASK; + gtt_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((gtt_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - gtt_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_kernel_write(dev_priv->mm.gtt_mapping, + gtt_page_base, gtt_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (ret) + goto out_unpin_object; + + remain -= page_length; + offset += page_length; + data_ptr += page_length; + } + +out_unpin_object: + i915_gem_object_unpin(obj); +out_unlock: + mutex_unlock(&dev->struct_mutex); +out_unpin_pages: + for (i = 0; i < pinned_pages; i++) + page_cache_release(user_pages[i]); + kfree(user_pages); + + return ret; +} + static int i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -388,9 +496,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj_priv->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); else if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0) - ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); - else + dev->gtt_total != 0) { + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, + file_priv); + } + } else ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); #if WATCH_PWRITE -- cgit v0.10.2 From 856fa1988ea483fc2dab84a16681dcfde821b740 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Mar 2009 14:10:50 -0700 Subject: drm/i915: Make GEM object's page lists refcounted instead of get/free. We've wanted this for a few consumers that touch the pages directly (such as the following commit), which have been doing the refcounting outside of get/put pages. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d6cc986..75e3384 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -404,7 +404,8 @@ struct drm_i915_gem_object { /** AGP memory structure for our GTT binding. */ DRM_AGP_MEM *agp_mem; - struct page **page_list; + struct page **pages; + int pages_refcount; /** * Current offset of the object in GTT space. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 35f8c7bd..b998d65 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_get_page_list(struct drm_gem_object *obj); -static void i915_gem_object_free_page_list(struct drm_gem_object *obj); +static int i915_gem_object_get_pages(struct drm_gem_object *obj); +static void i915_gem_object_put_pages(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); @@ -928,29 +928,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, } static void -i915_gem_object_free_page_list(struct drm_gem_object *obj) +i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count = obj->size / PAGE_SIZE; int i; - if (obj_priv->page_list == NULL) - return; + BUG_ON(obj_priv->pages_refcount == 0); + if (--obj_priv->pages_refcount != 0) + return; for (i = 0; i < page_count; i++) - if (obj_priv->page_list[i] != NULL) { + if (obj_priv->pages[i] != NULL) { if (obj_priv->dirty) - set_page_dirty(obj_priv->page_list[i]); - mark_page_accessed(obj_priv->page_list[i]); - page_cache_release(obj_priv->page_list[i]); + set_page_dirty(obj_priv->pages[i]); + mark_page_accessed(obj_priv->pages[i]); + page_cache_release(obj_priv->pages[i]); } obj_priv->dirty = 0; - drm_free(obj_priv->page_list, + drm_free(obj_priv->pages, page_count * sizeof(struct page *), DRM_MEM_DRIVER); - obj_priv->page_list = NULL; + obj_priv->pages = NULL; } static void @@ -1402,7 +1403,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); if (obj_priv->gtt_space) { atomic_dec(&dev->gtt_count); @@ -1521,7 +1522,7 @@ i915_gem_evict_everything(struct drm_device *dev) } static int -i915_gem_object_get_page_list(struct drm_gem_object *obj) +i915_gem_object_get_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count, i; @@ -1530,18 +1531,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) struct page *page; int ret; - if (obj_priv->page_list) + if (obj_priv->pages_refcount++ != 0) return 0; /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->page_list != NULL); - obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), - DRM_MEM_DRIVER); - if (obj_priv->page_list == NULL) { + BUG_ON(obj_priv->pages != NULL); + obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), + DRM_MEM_DRIVER); + if (obj_priv->pages == NULL) { DRM_ERROR("Faled to allocate page list\n"); + obj_priv->pages_refcount--; return -ENOMEM; } @@ -1552,10 +1554,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) if (IS_ERR(page)) { ret = PTR_ERR(page); DRM_ERROR("read_mapping_page failed: %d\n", ret); - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); return ret; } - obj_priv->page_list[i] = page; + obj_priv->pages[i] = page; } return 0; } @@ -1878,7 +1880,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) DRM_INFO("Binding object of size %d at 0x%08x\n", obj->size, obj_priv->gtt_offset); #endif - ret = i915_gem_object_get_page_list(obj); + ret = i915_gem_object_get_pages(obj); if (ret) { drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -1890,12 +1892,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) * into the GTT. */ obj_priv->agp_mem = drm_agp_bind_pages(dev, - obj_priv->page_list, + obj_priv->pages, page_count, obj_priv->gtt_offset, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; return -ENOMEM; @@ -1922,10 +1924,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj) * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ - if (obj_priv->page_list == NULL) + if (obj_priv->pages == NULL) return; - drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); + drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); } /** Flushes any GPU write domain for the object if it's dirty. */ @@ -2270,7 +2272,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { if (obj_priv->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->page_list + i, 1); + drm_clflush_pages(obj_priv->pages + i, 1); } drm_agp_chipset_flush(dev); } @@ -2336,7 +2338,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (obj_priv->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->page_list + i, 1); + drm_clflush_pages(obj_priv->pages + i, 1); obj_priv->page_cpu_valid[i] = 1; } @@ -3304,7 +3306,7 @@ i915_gem_init_hws(struct drm_device *dev) dev_priv->status_gfx_addr = obj_priv->gtt_offset; - dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); + dev_priv->hw_status_page = kmap(obj_priv->pages[0]); if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); @@ -3334,7 +3336,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) obj = dev_priv->hws_obj; obj_priv = obj->driver_private; - kunmap(obj_priv->page_list[0]); + kunmap(obj_priv->pages[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); dev_priv->hws_obj = NULL; @@ -3637,20 +3639,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev, if (!obj_priv->phys_obj) return; - ret = i915_gem_object_get_page_list(obj); + ret = i915_gem_object_get_pages(obj); if (ret) goto out; page_count = obj->size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); + char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(dst, KM_USER0); } - drm_clflush_pages(obj_priv->page_list, page_count); + drm_clflush_pages(obj_priv->pages, page_count); drm_agp_chipset_flush(dev); out: obj_priv->phys_obj->cur_obj = NULL; @@ -3693,7 +3695,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; obj_priv->phys_obj->cur_obj = obj; - ret = i915_gem_object_get_page_list(obj); + ret = i915_gem_object_get_pages(obj); if (ret) { DRM_ERROR("failed to get page list\n"); goto out; @@ -3702,7 +3704,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, page_count = obj->size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); + char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); -- cgit v0.10.2 From 40123c1f8dd920dcff7a42cde5b351d7d0b0422e Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 9 Mar 2009 13:42:30 -0700 Subject: drm/i915: Fix lock order reversal in shmem pwrite path. Like the GTT pwrite path fix, this uses an optimistic path and a fallback to get_user_pages. Note that this means we have to stop using vfs_write and roll it ourselves. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b998d65..bdc7326 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -136,6 +136,33 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +static inline int +slow_shmem_copy(struct page *dst_page, + int dst_offset, + struct page *src_page, + int src_offset, + int length) +{ + char *dst_vaddr, *src_vaddr; + + dst_vaddr = kmap_atomic(dst_page, KM_USER0); + if (dst_vaddr == NULL) + return -ENOMEM; + + src_vaddr = kmap_atomic(src_page, KM_USER1); + if (src_vaddr == NULL) { + kunmap_atomic(dst_vaddr, KM_USER0); + return -ENOMEM; + } + + memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); + + kunmap_atomic(src_vaddr, KM_USER1); + kunmap_atomic(dst_vaddr, KM_USER0); + + return 0; +} + /** * Reads data from the object referenced by handle. * @@ -243,6 +270,23 @@ slow_kernel_write(struct io_mapping *mapping, return 0; } +static inline int +fast_shmem_write(struct page **pages, + loff_t page_base, int page_offset, + char __user *data, + int length) +{ + char __iomem *vaddr; + + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); + if (vaddr == NULL) + return -ENOMEM; + __copy_from_user_inatomic(vaddr + page_offset, data, length); + kunmap_atomic(vaddr, KM_USER0); + + return 0; +} + /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. @@ -423,39 +467,175 @@ out_unpin_pages: return ret; } +/** + * This is the fast shmem pwrite path, which attempts to directly + * copy_from_user into the kmapped pages backing the object. + */ static int -i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) +i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length; int ret; - loff_t offset; - ssize_t written; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + obj_priv->dirty = 1; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_shmem_write(obj_priv->pages, + page_base, page_offset, + user_data, page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + user_data += page_length; + offset += page_length; } +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/** + * This is the fallback shmem pwrite path, which uses get_user_pages to pin + * the memory and maps it using kmap_atomic for copying. + * + * This avoids taking mmap_sem for faulting on the user's address while the + * struct_mutex is held. + */ +static int +i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; + loff_t offset, pinned_pages, i; + loff_t first_data_page, last_data_page, num_pages; + int shmem_page_index, shmem_page_offset; + int data_page_index, data_page_offset; + int page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, and all of the pwrite implementations + * want to hold it while dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto fail_put_user_pages; + } + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; offset = args->offset; + obj_priv->dirty = 1; - written = vfs_write(obj->filp, - (char __user *)(uintptr_t) args->data_ptr, - args->size, &offset); - if (written != args->size) { - mutex_unlock(&dev->struct_mutex); - if (written < 0) - return written; - else - return -EINVAL; + while (remain > 0) { + /* Operation in this page + * + * shmem_page_index = page number within shmem file + * shmem_page_offset = offset within page in shmem file + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + shmem_page_index = offset / PAGE_SIZE; + shmem_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; + offset += page_length; } +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: mutex_unlock(&dev->struct_mutex); +fail_put_user_pages: + for (i = 0; i < pinned_pages; i++) + page_cache_release(user_pages[i]); + kfree(user_pages); - return 0; + return ret; } /** @@ -502,8 +682,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file_priv); } - } else - ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); + } else { + ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, + file_priv); + } + } #if WATCH_PWRITE if (ret) -- cgit v0.10.2 From eb01459fbbccb4ca0b879cbfc97e33ac6eabf975 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 10 Mar 2009 11:44:52 -0700 Subject: drm/i915: Fix lock order reversal in shmem pread path. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bdc7326..010af90 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -137,6 +137,24 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, } static inline int +fast_shmem_read(struct page **pages, + loff_t page_base, int page_offset, + char __user *data, + int length) +{ + char __iomem *vaddr; + int ret; + + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); + if (vaddr == NULL) + return -ENOMEM; + ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); + kunmap_atomic(vaddr, KM_USER0); + + return ret; +} + +static inline int slow_shmem_copy(struct page *dst_page, int dst_offset, struct page *src_page, @@ -164,6 +182,179 @@ slow_shmem_copy(struct page *dst_page, } /** + * This is the fast shmem pread path, which attempts to copy_from_user directly + * from the backing pages of the object to the user's address space. On a + * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). + */ +static int +i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length; + int ret; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_shmem_read(obj_priv->pages, + page_base, page_offset, + user_data, page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/** + * This is the fallback shmem pread path, which allocates temporary storage + * in kernel space to copy_to_user into outside of the struct_mutex, so we + * can copy out of the object's backing pages while holding the struct mutex + * and not take page faults. + */ +static int +i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; + loff_t offset, pinned_pages, i; + loff_t first_data_page, last_data_page, num_pages; + int shmem_page_index, shmem_page_offset; + int data_page_index, data_page_offset; + int page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, yet we want to hold it while + * dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto fail_put_user_pages; + } + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + + while (remain > 0) { + /* Operation in this page + * + * shmem_page_index = page number within shmem file + * shmem_page_offset = offset within page in shmem file + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + shmem_page_index = offset / PAGE_SIZE; + shmem_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_shmem_copy(user_pages[data_page_index], + data_page_offset, + obj_priv->pages[shmem_page_index], + shmem_page_offset, + page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); +fail_put_user_pages: + for (i = 0; i < pinned_pages; i++) { + SetPageDirty(user_pages[i]); + page_cache_release(user_pages[i]); + } + kfree(user_pages); + + return ret; +} + +/** * Reads data from the object referenced by handle. * * On error, the contents of *data are undefined. @@ -175,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pread *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - ssize_t read; - loff_t offset; int ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); @@ -194,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, - args->size); - if (ret != 0) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - offset = args->offset; - - read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, - args->size, &offset); - if (read != args->size) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (read < 0) - return read; - else - return -EINVAL; - } + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret != 0) + ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } /* This is the fast write path which cannot handle -- cgit v0.10.2 From 201361a54ed187d8595a283e3a4ddb213bc8323b Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 11 Mar 2009 12:30:04 -0700 Subject: drm/i915: Fix lock order reversal with cliprects and cmdbuf in non-DRI2 paths. This introduces allocation in the batch submission path that wasn't there previously, but these are compatibility paths so we care about simplicity more than performance. kernel.org bug #12419. Signed-off-by: Eric Anholt Reviewed-by: Keith Packard Acked-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6d21b9e..ae83fe0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -356,7 +356,7 @@ static int validate_cmd(int cmd) return ret; } -static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) +static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -370,8 +370,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor for (i = 0; i < dwords;) { int cmd, sz; - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) - return -EINVAL; + cmd = buffer[i]; if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return -EINVAL; @@ -379,11 +378,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor OUT_RING(cmd); while (++i, --sz) { - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], - sizeof(cmd))) { - return -EINVAL; - } - OUT_RING(cmd); + OUT_RING(buffer[i]); } } @@ -397,17 +392,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect __user *boxes, + struct drm_clip_rect *boxes, int i, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect box; + struct drm_clip_rect box = boxes[i]; RING_LOCALS; - if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { - return -EFAULT; - } - if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box.x1, box.y1, box.x2, box.y2); @@ -460,7 +451,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev) } static int i915_dispatch_cmdbuffer(struct drm_device * dev, - drm_i915_cmdbuffer_t * cmd) + drm_i915_cmdbuffer_t *cmd, + struct drm_clip_rect *cliprects, + void *cmdbuf) { int nbox = cmd->num_cliprects; int i = 0, count, ret; @@ -476,13 +469,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, for (i = 0; i < count; i++) { if (i < nbox) { - ret = i915_emit_box(dev, cmd->cliprects, i, + ret = i915_emit_box(dev, cliprects, i, cmd->DR1, cmd->DR4); if (ret) return ret; } - ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); + ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); if (ret) return ret; } @@ -492,10 +485,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, } static int i915_dispatch_batchbuffer(struct drm_device * dev, - drm_i915_batchbuffer_t * batch) + drm_i915_batchbuffer_t * batch, + struct drm_clip_rect *cliprects) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect __user *boxes = batch->cliprects; int nbox = batch->num_cliprects; int i = 0, count; RING_LOCALS; @@ -511,7 +504,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, boxes, i, + int ret = i915_emit_box(dev, cliprects, i, batch->DR1, batch->DR4); if (ret) return ret; @@ -626,6 +619,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, master_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; int ret; + struct drm_clip_rect *cliprects = NULL; if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); @@ -637,17 +631,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, - batch->num_cliprects * - sizeof(struct drm_clip_rect))) - return -EFAULT; + if (batch->num_cliprects < 0) + return -EINVAL; + + if (batch->num_cliprects) { + cliprects = drm_calloc(batch->num_cliprects, + sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + if (cliprects == NULL) + return -ENOMEM; + + ret = copy_from_user(cliprects, batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect)); + if (ret != 0) + goto fail_free; + } mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_batchbuffer(dev, batch); + ret = i915_dispatch_batchbuffer(dev, batch, cliprects); mutex_unlock(&dev->struct_mutex); if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + +fail_free: + drm_free(cliprects, + batch->num_cliprects * sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + return ret; } @@ -659,6 +671,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; + struct drm_clip_rect *cliprects = NULL; + void *batch_data; int ret; DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", @@ -666,25 +680,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - if (cmdbuf->num_cliprects && - DRM_VERIFYAREA_READ(cmdbuf->cliprects, - cmdbuf->num_cliprects * - sizeof(struct drm_clip_rect))) { - DRM_ERROR("Fault accessing cliprects\n"); - return -EFAULT; + if (cmdbuf->num_cliprects < 0) + return -EINVAL; + + batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); + if (batch_data == NULL) + return -ENOMEM; + + ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); + if (ret != 0) + goto fail_batch_free; + + if (cmdbuf->num_cliprects) { + cliprects = drm_calloc(cmdbuf->num_cliprects, + sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + if (cliprects == NULL) + goto fail_batch_free; + + ret = copy_from_user(cliprects, cmdbuf->cliprects, + cmdbuf->num_cliprects * + sizeof(struct drm_clip_rect)); + if (ret != 0) + goto fail_clip_free; } mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_cmdbuffer(dev, cmdbuf); + ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); - return ret; + goto fail_batch_free; } if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - return 0; + +fail_batch_free: + drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); +fail_clip_free: + drm_free(cliprects, + cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + + return ret; } static int i915_flip_bufs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 75e3384..2c02ce6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -520,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect __user *boxes, + struct drm_clip_rect *boxes, int i, int DR1, int DR4); /* i915_irq.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 010af90..2bda151 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2891,11 +2891,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, static int i915_dispatch_gem_execbuffer(struct drm_device *dev, struct drm_i915_gem_execbuffer *exec, + struct drm_clip_rect *cliprects, uint64_t exec_offset) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) - (uintptr_t) exec->cliprects_ptr; int nbox = exec->num_cliprects; int i = 0, count; uint32_t exec_start, exec_len; @@ -2916,7 +2915,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, boxes, i, + int ret = i915_emit_box(dev, cliprects, i, exec->DR1, exec->DR4); if (ret) return ret; @@ -2983,6 +2982,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; + struct drm_clip_rect *cliprects = NULL; int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; @@ -3019,6 +3019,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } + if (args->num_cliprects != 0) { + cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), + DRM_MEM_DRIVER); + if (cliprects == NULL) + goto pre_mutex_err; + + ret = copy_from_user(cliprects, + (struct drm_clip_rect __user *) + (uintptr_t) args->cliprects_ptr, + sizeof(*cliprects) * args->num_cliprects); + if (ret != 0) { + DRM_ERROR("copy %d cliprects failed: %d\n", + args->num_cliprects, ret); + goto pre_mutex_err; + } + } + mutex_lock(&dev->struct_mutex); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3155,7 +3172,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, #endif /* Exec the batchbuffer */ - ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); + ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -3224,6 +3241,8 @@ pre_mutex_err: DRM_MEM_DRIVER); drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, DRM_MEM_DRIVER); + drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, + DRM_MEM_DRIVER); return ret; } -- cgit v0.10.2 From 40a5f0decdf050785ebd62b36ad48c869ee4b384 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 12 Mar 2009 11:23:52 -0700 Subject: drm/i915: Fix lock order reversal in GEM relocation entry copying. Signed-off-by: Eric Anholt Reviewed-by: Keith Packard diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2bda151..f135c90 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2713,12 +2713,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, static int i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_file *file_priv, - struct drm_i915_gem_exec_object *entry) + struct drm_i915_gem_exec_object *entry, + struct drm_i915_gem_relocation_entry *relocs) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_relocation_entry reloc; - struct drm_i915_gem_relocation_entry __user *relocs; struct drm_i915_gem_object *obj_priv = obj->driver_private; int i, ret; void __iomem *reloc_page; @@ -2730,25 +2729,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, entry->offset = obj_priv->gtt_offset; - relocs = (struct drm_i915_gem_relocation_entry __user *) - (uintptr_t) entry->relocs_ptr; /* Apply the relocations, using the GTT aperture to avoid cache * flushing requirements. */ for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_obj_priv; uint32_t reloc_val, reloc_offset; uint32_t __iomem *reloc_entry; - ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } - target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc.target_handle); + reloc->target_handle); if (target_obj == NULL) { i915_gem_object_unpin(obj); return -EBADF; @@ -2760,53 +2752,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, */ if (target_obj_priv->gtt_space == NULL) { DRM_ERROR("No GTT space found for object %d\n", - reloc.target_handle); + reloc->target_handle); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.offset > obj->size - 4) { + if (reloc->offset > obj->size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.offset, (int) obj->size); + obj, reloc->target_handle, + (int) reloc->offset, (int) obj->size); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.offset & 3) { + if (reloc->offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", - obj, reloc.target_handle, - (int) reloc.offset); + obj, reloc->target_handle, + (int) reloc->offset); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.write_domain & I915_GEM_DOMAIN_CPU || - reloc.read_domains & I915_GEM_DOMAIN_CPU) { + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.read_domains, - reloc.write_domain); + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.write_domain && target_obj->pending_write_domain && - reloc.write_domain != target_obj->pending_write_domain) { + if (reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.write_domain, + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, target_obj->pending_write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); @@ -2819,22 +2811,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "presumed %08x delta %08x\n", __func__, obj, - (int) reloc.offset, - (int) reloc.target_handle, - (int) reloc.read_domains, - (int) reloc.write_domain, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, (int) target_obj_priv->gtt_offset, - (int) reloc.presumed_offset, - reloc.delta); + (int) reloc->presumed_offset, + reloc->delta); #endif - target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain |= reloc.write_domain; + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc.presumed_offset) { + if (target_obj_priv->gtt_offset == reloc->presumed_offset) { drm_gem_object_unreference(target_obj); continue; } @@ -2849,32 +2841,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, /* Map the page containing the relocation we're going to * perform. */ - reloc_offset = obj_priv->gtt_offset + reloc.offset; + reloc_offset = obj_priv->gtt_offset + reloc->offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, (reloc_offset & ~(PAGE_SIZE - 1))); reloc_entry = (uint32_t __iomem *)(reloc_page + (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc.delta; + reloc_val = target_obj_priv->gtt_offset + reloc->delta; #if WATCH_BUF DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", - obj, (unsigned int) reloc.offset, + obj, (unsigned int) reloc->offset, readl(reloc_entry), reloc_val); #endif writel(reloc_val, reloc_entry); io_mapping_unmap_atomic(reloc_page); - /* Write the updated presumed offset for this entry back out - * to the user. + /* The updated presumed offset for this entry will be + * copied back out to the user. */ - reloc.presumed_offset = target_obj_priv->gtt_offset; - ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); - if (ret != 0) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return ret; - } + reloc->presumed_offset = target_obj_priv->gtt_offset; drm_gem_object_unreference(target_obj); } @@ -2971,6 +2957,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) return ret; } +static int +i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, + uint32_t buffer_count, + struct drm_i915_gem_relocation_entry **relocs) +{ + uint32_t reloc_count = 0, reloc_index = 0, i; + int ret; + + *relocs = NULL; + for (i = 0; i < buffer_count; i++) { + if (reloc_count + exec_list[i].relocation_count < reloc_count) + return -EINVAL; + reloc_count += exec_list[i].relocation_count; + } + + *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); + if (*relocs == NULL) + return -ENOMEM; + + for (i = 0; i < buffer_count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + ret = copy_from_user(&(*relocs)[reloc_index], + user_relocs, + exec_list[i].relocation_count * + sizeof(**relocs)); + if (ret != 0) { + drm_free(*relocs, reloc_count * sizeof(**relocs), + DRM_MEM_DRIVER); + *relocs = NULL; + return ret; + } + + reloc_index += exec_list[i].relocation_count; + } + + return ret; +} + +static int +i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, + uint32_t buffer_count, + struct drm_i915_gem_relocation_entry *relocs) +{ + uint32_t reloc_count = 0, i; + int ret; + + for (i = 0; i < buffer_count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + if (ret == 0) { + ret = copy_to_user(user_relocs, + &relocs[reloc_count], + exec_list[i].relocation_count * + sizeof(*relocs)); + } + + reloc_count += exec_list[i].relocation_count; + } + + drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); + + return ret; +} + int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -2983,9 +3038,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - int ret, i, pinned = 0; + struct drm_i915_gem_relocation_entry *relocs; + int ret, ret2, i, pinned = 0; uint64_t exec_offset; - uint32_t seqno, flush_domains; + uint32_t seqno, flush_domains, reloc_index; int pin_tries; #if WATCH_EXEC @@ -3036,6 +3092,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, } } + ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, + &relocs); + if (ret != 0) + goto pre_mutex_err; + mutex_lock(&dev->struct_mutex); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3078,15 +3139,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Pin and relocate */ for (pin_tries = 0; ; pin_tries++) { ret = 0; + reloc_index = 0; + for (i = 0; i < args->buffer_count; i++) { object_list[i]->pending_read_domains = 0; object_list[i]->pending_write_domain = 0; ret = i915_gem_object_pin_and_relocate(object_list[i], file_priv, - &exec_list[i]); + &exec_list[i], + &relocs[reloc_index]); if (ret) break; pinned = i + 1; + reloc_index += exec_list[i].relocation_count; } /* success */ if (ret == 0) @@ -3236,6 +3301,20 @@ err: args->buffer_count, ret); } + /* Copy the updated relocations out regardless of current error + * state. Failure to update the relocs would mean that the next + * time userland calls execbuf, it would do so with presumed offset + * state that didn't match the actual object state. + */ + ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, + relocs); + if (ret2 != 0) { + DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); + + if (ret == 0) + ret = ret2; + } + pre_mutex_err: drm_free(object_list, sizeof(*object_list) * args->buffer_count, DRM_MEM_DRIVER); -- cgit v0.10.2 From 28a62277e06f93729d0340d9659153dcfbdbe16d Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 17 Feb 2009 20:08:49 -0500 Subject: drm: Convert proc files to seq_file and introduce debugfs The old mechanism to formatting proc files is extremely ugly. The seq_file API was designed specifically for cases like this and greatly simplifies the process. Also, most of the files in /proc really don't belong there. This patch introduces the infrastructure for putting these into debugfs and exposes all of the proc files in debugfs as well. Signed-off-by: Ben Gamari Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 30022c4..4ec5061 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ - drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o + drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ + drm_info.o drm_debugfs.o drm-$(CONFIG_COMPAT) += drm_ioc32.o diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c new file mode 100644 index 0000000..c77c6c6 --- /dev/null +++ b/drivers/gpu/drm/drm_debugfs.c @@ -0,0 +1,235 @@ +/** + * \file drm_debugfs.c + * debugfs support for DRM + * + * \author Ben Gamari + */ + +/* + * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com + * + * Copyright 2008 Ben Gamari + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "drmP.h" + +#if defined(CONFIG_DEBUG_FS) + +/*************************************************** + * Initialization, etc. + **************************************************/ + +static struct drm_info_list drm_debugfs_list[] = { + {"name", drm_name_info, 0}, + {"vm", drm_vm_info, 0}, + {"clients", drm_clients_info, 0}, + {"queues", drm_queues_info, 0}, + {"bufs", drm_bufs_info, 0}, + {"gem_names", drm_gem_name_info, DRIVER_GEM}, + {"gem_objects", drm_gem_object_info, DRIVER_GEM}, +#if DRM_DEBUG_CODE + {"vma", drm_vma_info, 0}, +#endif +}; +#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) + + +static int drm_debugfs_open(struct inode *inode, struct file *file) +{ + struct drm_info_node *node = inode->i_private; + + return single_open(file, node->info_ent->show, node); +} + + +static const struct file_operations drm_debugfs_fops = { + .owner = THIS_MODULE, + .open = drm_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +/** + * Initialize a given set of debugfs files for a device + * + * \param files The array of files to create + * \param count The number of files given + * \param root DRI debugfs dir entry. + * \param minor device minor number + * \return Zero on success, non-zero on failure + * + * Create a given set of debugfs files represented by an array of + * gdm_debugfs_lists in the given root directory. + */ +int drm_debugfs_create_files(struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + struct drm_info_node *tmp; + char name[64]; + int i, ret; + + for (i = 0; i < count; i++) { + u32 features = files[i].driver_features; + + if (features != 0 && + (dev->driver->driver_features & features) != features) + continue; + + tmp = drm_alloc(sizeof(struct drm_info_node), + _DRM_DRIVER); + ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, + root, tmp, &drm_debugfs_fops); + if (!ent) { + DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n", + name, files[i].name); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); + ret = -1; + goto fail; + } + + tmp->minor = minor; + tmp->dent = ent; + tmp->info_ent = &files[i]; + list_add(&(tmp->list), &(minor->debugfs_nodes.list)); + } + return 0; + +fail: + drm_debugfs_remove_files(files, count, minor); + return ret; +} +EXPORT_SYMBOL(drm_debugfs_create_files); + +/** + * Initialize the DRI debugfs filesystem for a device + * + * \param dev DRM device + * \param minor device minor number + * \param root DRI debugfs dir entry. + * + * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry + * "/debugfs/dri/%minor%/", and each entry in debugfs_list as + * "/debugfs/dri/%minor%/%name%". + */ +int drm_debugfs_init(struct drm_minor *minor, int minor_id, + struct dentry *root) +{ + struct drm_device *dev = minor->dev; + char name[64]; + int ret; + + INIT_LIST_HEAD(&minor->debugfs_nodes.list); + sprintf(name, "%d", minor_id); + minor->debugfs_root = debugfs_create_dir(name, root); + if (!minor->debugfs_root) { + DRM_ERROR("Cannot create /debugfs/dri/%s\n", name); + return -1; + } + + ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + if (ret) { + debugfs_remove(minor->debugfs_root); + minor->debugfs_root = NULL; + DRM_ERROR("Failed to create core drm debugfs files\n"); + return ret; + } + + if (dev->driver->debugfs_init) { + ret = dev->driver->debugfs_init(minor); + if (ret) { + DRM_ERROR("DRM: Driver failed to initialize " + "/debugfs/dri.\n"); + return ret; + } + } + return 0; +} + + +/** + * Remove a list of debugfs files + * + * \param files The list of files + * \param count The number of files + * \param minor The minor of which we should remove the files + * \return always zero. + * + * Remove all debugfs entries created by debugfs_init(). + */ +int drm_debugfs_remove_files(struct drm_info_list *files, int count, + struct drm_minor *minor) +{ + struct list_head *pos, *q; + struct drm_info_node *tmp; + int i; + + for (i = 0; i < count; i++) { + list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { + tmp = list_entry(pos, struct drm_info_node, list); + if (tmp->info_ent == &files[i]) { + debugfs_remove(tmp->dent); + list_del(pos); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); + } + } + } + return 0; +} +EXPORT_SYMBOL(drm_debugfs_remove_files); + +/** + * Cleanup the debugfs filesystem resources. + * + * \param minor device minor number. + * \return always zero. + * + * Remove all debugfs entries created by debugfs_init(). + */ +int drm_debugfs_cleanup(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + + if (!minor->debugfs_root) + return 0; + + if (dev->driver->debugfs_cleanup) + dev->driver->debugfs_cleanup(minor); + + drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor); + + debugfs_remove(minor->debugfs_root); + minor->debugfs_root = NULL; + + return 0; +} + +#endif /* CONFIG_DEBUG_FS */ + diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 14c7a23..ed32edb 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -46,9 +46,11 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include #include "drmP.h" #include "drm_core.h" + static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev) /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp && - !drm_core_check_feature(dev, DRIVER_MODESET)) { + !drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp @@ -382,6 +384,13 @@ static int __init drm_core_init(void) goto err_p3; } + drm_debugfs_root = debugfs_create_dir("dri", NULL); + if (!drm_debugfs_root) { + DRM_ERROR("Cannot create /debugfs/dri\n"); + ret = -1; + goto err_p3; + } + drm_mem_init(); DRM_INFO("Initialized %s %d.%d.%d %s\n", @@ -400,6 +409,7 @@ err_p1: static void __exit drm_core_exit(void) { remove_proc_entry("dri", NULL); + debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); unregister_chrdev(DRM_MAJOR, "drm"); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c new file mode 100644 index 0000000..fc98952 --- /dev/null +++ b/drivers/gpu/drm/drm_info.c @@ -0,0 +1,328 @@ +/** + * \file drm_info.c + * DRM info file implementations + * + * \author Ben Gamari + */ + +/* + * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright 2008 Ben Gamari + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include "drmP.h" + +/** + * Called when "/proc/dri/.../name" is read. + * + * Prints the device name together with the bus id if available. + */ +int drm_name_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_minor *minor = node->minor; + struct drm_device *dev = minor->dev; + struct drm_master *master = minor->master; + + if (!master) + return 0; + + if (master->unique) { + seq_printf(m, "%s %s %s\n", + dev->driver->pci_driver.name, + pci_name(dev->pdev), master->unique); + } else { + seq_printf(m, "%s %s\n", dev->driver->pci_driver.name, + pci_name(dev->pdev)); + } + + return 0; +} + +/** + * Called when "/proc/dri/.../vm" is read. + * + * Prints information about all mappings in drm_device::maplist. + */ +int drm_vm_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_map *map; + struct drm_map_list *r_list; + + /* Hardcoded from _DRM_FRAME_BUFFER, + _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and + _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ + const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; + const char *type; + int i; + + mutex_lock(&dev->struct_mutex); + seq_printf(m, "slot offset size type flags address mtrr\n\n"); + i = 0; + list_for_each_entry(r_list, &dev->maplist, head) { + map = r_list->map; + if (!map) + continue; + if (map->type < 0 || map->type > 5) + type = "??"; + else + type = types[map->type]; + + seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", + i, + map->offset, + map->size, type, map->flags, + (unsigned long) r_list->user_token); + if (map->mtrr < 0) + seq_printf(m, "none\n"); + else + seq_printf(m, "%4d\n", map->mtrr); + i++; + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../queues" is read. + */ +int drm_queues_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int i; + struct drm_queue *q; + + mutex_lock(&dev->struct_mutex); + seq_printf(m, " ctx/flags use fin" + " blk/rw/rwf wait flushed queued" + " locks\n\n"); + for (i = 0; i < dev->queue_count; i++) { + q = dev->queuelist[i]; + atomic_inc(&q->use_count); + seq_printf(m, "%5d/0x%03x %5d %5d" + " %5d/%c%c/%c%c%c %5Zd\n", + i, + q->flags, + atomic_read(&q->use_count), + atomic_read(&q->finalization), + atomic_read(&q->block_count), + atomic_read(&q->block_read) ? 'r' : '-', + atomic_read(&q->block_write) ? 'w' : '-', + waitqueue_active(&q->read_queue) ? 'r' : '-', + waitqueue_active(&q->write_queue) ? 'w' : '-', + waitqueue_active(&q->flush_queue) ? 'f' : '-', + DRM_BUFCOUNT(&q->waitlist)); + atomic_dec(&q->use_count); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../bufs" is read. + */ +int drm_bufs_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_device_dma *dma; + int i, seg_pages; + + mutex_lock(&dev->struct_mutex); + dma = dev->dma; + if (!dma) { + mutex_unlock(&dev->struct_mutex); + return 0; + } + + seq_printf(m, " o size count free segs pages kB\n\n"); + for (i = 0; i <= DRM_MAX_ORDER; i++) { + if (dma->bufs[i].buf_count) { + seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order); + seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n", + i, + dma->bufs[i].buf_size, + dma->bufs[i].buf_count, + atomic_read(&dma->bufs[i].freelist.count), + dma->bufs[i].seg_count, + seg_pages, + seg_pages * PAGE_SIZE / 1024); + } + } + seq_printf(m, "\n"); + for (i = 0; i < dma->buf_count; i++) { + if (i && !(i % 32)) + seq_printf(m, "\n"); + seq_printf(m, " %d", dma->buflist[i]->list); + } + seq_printf(m, "\n"); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../vblank" is read. + */ +int drm_vblank_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int crtc; + + mutex_lock(&dev->struct_mutex); + for (crtc = 0; crtc < dev->num_crtcs; crtc++) { + seq_printf(m, "CRTC %d enable: %d\n", + crtc, atomic_read(&dev->vblank_refcount[crtc])); + seq_printf(m, "CRTC %d counter: %d\n", + crtc, drm_vblank_count(dev, crtc)); + seq_printf(m, "CRTC %d last wait: %d\n", + crtc, dev->last_vblank_wait[crtc]); + seq_printf(m, "CRTC %d in modeset: %d\n", + crtc, dev->vblank_inmodeset[crtc]); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../clients" is read. + * + */ +int drm_clients_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_file *priv; + + mutex_lock(&dev->struct_mutex); + seq_printf(m, "a dev pid uid magic ioctls\n\n"); + list_for_each_entry(priv, &dev->filelist, lhead) { + seq_printf(m, "%c %3d %5d %5d %10u %10lu\n", + priv->authenticated ? 'y' : 'n', + priv->minor->index, + priv->pid, + priv->uid, priv->magic, priv->ioctl_count); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + + +int drm_gem_one_name_info(int id, void *ptr, void *data) +{ + struct drm_gem_object *obj = ptr; + struct seq_file *m = data; + + seq_printf(m, "name %d size %zd\n", obj->name, obj->size); + + seq_printf(m, "%6d %8zd %7d %8d\n", + obj->name, obj->size, + atomic_read(&obj->handlecount.refcount), + atomic_read(&obj->refcount.refcount)); + return 0; +} + +int drm_gem_name_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + + seq_printf(m, " name size handles refcount\n"); + idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); + return 0; +} + +int drm_gem_object_info(struct seq_file *m, void* data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + + seq_printf(m, "%d objects\n", atomic_read(&dev->object_count)); + seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory)); + seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count)); + seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory)); + seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory)); + seq_printf(m, "%d gtt total\n", dev->gtt_total); + return 0; +} + +#if DRM_DEBUG_CODE + +int drm_vma_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_vma_entry *pt; + struct vm_area_struct *vma; +#if defined(__i386__) + unsigned int pgprot; +#endif + + mutex_lock(&dev->struct_mutex); + seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n", + atomic_read(&dev->vma_count), + high_memory, virt_to_phys(high_memory)); + + list_for_each_entry(pt, &dev->vmalist, head) { + vma = pt->vma; + if (!vma) + continue; + seq_printf(m, + "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", + pt->pid, vma->vm_start, vma->vm_end, + vma->vm_flags & VM_READ ? 'r' : '-', + vma->vm_flags & VM_WRITE ? 'w' : '-', + vma->vm_flags & VM_EXEC ? 'x' : '-', + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', + vma->vm_pgoff); + +#if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); + seq_printf(m, " %c%c%c%c%c%c%c%c%c", + pgprot & _PAGE_PRESENT ? 'p' : '-', + pgprot & _PAGE_RW ? 'w' : 'r', + pgprot & _PAGE_USER ? 'u' : 's', + pgprot & _PAGE_PWT ? 't' : 'b', + pgprot & _PAGE_PCD ? 'u' : 'c', + pgprot & _PAGE_ACCESSED ? 'a' : '-', + pgprot & _PAGE_DIRTY ? 'd' : '-', + pgprot & _PAGE_PSE ? 'm' : 'k', + pgprot & _PAGE_GLOBAL ? 'g' : 'l'); +#endif + seq_printf(m, "\n"); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +#endif + diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 8df849f..9b3c5af 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -37,697 +37,196 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include #include "drmP.h" -static int drm_name_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_vm_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_clients_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_queues_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_bufs_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_vblank_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_gem_name_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_gem_object_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -#if DRM_DEBUG_CODE -static int drm_vma_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -#endif + +/*************************************************** + * Initialization, etc. + **************************************************/ /** * Proc file list. */ -static struct drm_proc_list { - const char *name; /**< file name */ - int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ - u32 driver_features; /**< Required driver features for this entry */ -} drm_proc_list[] = { +static struct drm_info_list drm_proc_list[] = { {"name", drm_name_info, 0}, - {"mem", drm_mem_info, 0}, {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, - {"vblank", drm_vblank_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, {"gem_objects", drm_gem_object_info, DRIVER_GEM}, #if DRM_DEBUG_CODE - {"vma", drm_vma_info}, + {"vma", drm_vma_info, 0}, #endif }; - #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) +static int drm_proc_open(struct inode *inode, struct file *file) +{ + struct drm_info_node* node = PDE(inode)->data; + + return single_open(file, node->info_ent->show, node); +} + +static const struct file_operations drm_proc_fops = { + .owner = THIS_MODULE, + .open = drm_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + /** - * Initialize the DRI proc filesystem for a device. + * Initialize a given set of proc files for a device * - * \param dev DRM device. - * \param minor device minor number. + * \param files The array of files to create + * \param count The number of files given * \param root DRI proc dir entry. - * \param dev_root resulting DRI device proc dir entry. - * \return root entry pointer on success, or NULL on failure. + * \param minor device minor number + * \return Zero on success, non-zero on failure * - * Create the DRI proc root entry "/proc/dri", the device proc root entry - * "/proc/dri/%minor%/", and each entry in proc_list as - * "/proc/dri/%minor%/%name%". + * Create a given set of proc files represented by an array of + * gdm_proc_lists in the given root directory. */ -int drm_proc_init(struct drm_minor *minor, int minor_id, - struct proc_dir_entry *root) +int drm_proc_create_files(struct drm_info_list *files, int count, + struct proc_dir_entry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct proc_dir_entry *ent; - int i, j, ret; + struct drm_info_node *tmp; char name[64]; + int i, ret; - sprintf(name, "%d", minor_id); - minor->dev_root = proc_mkdir(name, root); - if (!minor->dev_root) { - DRM_ERROR("Cannot create /proc/dri/%s\n", name); - return -1; - } - - for (i = 0; i < DRM_PROC_ENTRIES; i++) { - u32 features = drm_proc_list[i].driver_features; + for (i = 0; i < count; i++) { + u32 features = files[i].driver_features; if (features != 0 && (dev->driver->driver_features & features) != features) continue; - ent = create_proc_entry(drm_proc_list[i].name, - S_IFREG | S_IRUGO, minor->dev_root); + tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); + ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); if (!ent) { DRM_ERROR("Cannot create /proc/dri/%s/%s\n", - name, drm_proc_list[i].name); + name, files[i].name); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); ret = -1; goto fail; } - ent->read_proc = drm_proc_list[i].f; - ent->data = minor; - } - if (dev->driver->proc_init) { - ret = dev->driver->proc_init(minor); - if (ret) { - DRM_ERROR("DRM: Driver failed to initialize " - "/proc/dri.\n"); - goto fail; - } + ent->proc_fops = &drm_proc_fops; + ent->data = tmp; + tmp->minor = minor; + tmp->info_ent = &files[i]; + list_add(&(tmp->list), &(minor->proc_nodes.list)); } - return 0; - fail: - for (j = 0; j < i; j++) - remove_proc_entry(drm_proc_list[i].name, - minor->dev_root); - remove_proc_entry(name, root); - minor->dev_root = NULL; +fail: + for (i = 0; i < count; i++) + remove_proc_entry(drm_proc_list[i].name, minor->proc_root); return ret; } /** - * Cleanup the proc filesystem resources. + * Initialize the DRI proc filesystem for a device * - * \param minor device minor number. + * \param dev DRM device + * \param minor device minor number * \param root DRI proc dir entry. - * \param dev_root DRI device proc dir entry. - * \return always zero. + * \param dev_root resulting DRI device proc dir entry. + * \return root entry pointer on success, or NULL on failure. * - * Remove all proc entries created by proc_init(). + * Create the DRI proc root entry "/proc/dri", the device proc root entry + * "/proc/dri/%minor%/", and each entry in proc_list as + * "/proc/dri/%minor%/%name%". */ -int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) +int drm_proc_init(struct drm_minor *minor, int minor_id, + struct proc_dir_entry *root) { struct drm_device *dev = minor->dev; - int i; char name[64]; + int ret; - if (!root || !minor->dev_root) - return 0; - - if (dev->driver->proc_cleanup) - dev->driver->proc_cleanup(minor); - - for (i = 0; i < DRM_PROC_ENTRIES; i++) - remove_proc_entry(drm_proc_list[i].name, minor->dev_root); - sprintf(name, "%d", minor->index); - remove_proc_entry(name, root); - - return 0; -} - -/** - * Called when "/proc/dri/.../name" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - * - * Prints the device name together with the bus id if available. - */ -static int drm_name_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_master *master = minor->master; - struct drm_device *dev = minor->dev; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; + INIT_LIST_HEAD(&minor->proc_nodes.list); + sprintf(name, "%d", minor_id); + minor->proc_root = proc_mkdir(name, root); + if (!minor->proc_root) { + DRM_ERROR("Cannot create /proc/dri/%s\n", name); + return -1; } - if (!master) - return 0; - - *start = &buf[offset]; - *eof = 0; - - if (master->unique) { - DRM_PROC_PRINT("%s %s %s\n", - dev->driver->pci_driver.name, - pci_name(dev->pdev), master->unique); - } else { - DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, - pci_name(dev->pdev)); + ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES, + minor->proc_root, minor); + if (ret) { + remove_proc_entry(name, root); + minor->proc_root = NULL; + DRM_ERROR("Failed to create core drm proc files\n"); + return ret; } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Called when "/proc/dri/.../vm" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - * - * Prints information about all mappings in drm_device::maplist. - */ -static int drm__vm_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - struct drm_map *map; - struct drm_map_list *r_list; - - /* Hardcoded from _DRM_FRAME_BUFFER, - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; - const char *type; - int i; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT("slot offset size type flags " - "address mtrr\n\n"); - i = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - map = r_list->map; - if (!map) - continue; - if (map->type < 0 || map->type > 5) - type = "??"; - else - type = types[map->type]; - DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", - i, - map->offset, - map->size, type, map->flags, - (unsigned long) r_list->user_token); - if (map->mtrr < 0) { - DRM_PROC_PRINT("none\n"); - } else { - DRM_PROC_PRINT("%4d\n", map->mtrr); + if (dev->driver->proc_init) { + ret = dev->driver->proc_init(minor); + if (ret) { + DRM_ERROR("DRM: Driver failed to initialize " + "/proc/dri.\n"); + return ret; } - i++; - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_vm_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__vm_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/** - * Called when "/proc/dri/.../queues" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - */ -static int drm__queues_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - int i; - struct drm_queue *q; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT(" ctx/flags use fin" - " blk/rw/rwf wait flushed queued" - " locks\n\n"); - for (i = 0; i < dev->queue_count; i++) { - q = dev->queuelist[i]; - atomic_inc(&q->use_count); - DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), - "%5d/0x%03x %5d %5d" - " %5d/%c%c/%c%c%c %5Zd\n", - i, - q->flags, - atomic_read(&q->use_count), - atomic_read(&q->finalization), - atomic_read(&q->block_count), - atomic_read(&q->block_read) ? 'r' : '-', - atomic_read(&q->block_write) ? 'w' : '-', - waitqueue_active(&q->read_queue) ? 'r' : '-', - waitqueue_active(&q-> - write_queue) ? 'w' : '-', - waitqueue_active(&q-> - flush_queue) ? 'f' : '-', - DRM_BUFCOUNT(&q->waitlist)); - atomic_dec(&q->use_count); - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_queues_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__queues_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } -/** - * Called when "/proc/dri/.../bufs" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - */ -static int drm__bufs_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) +int drm_proc_remove_files(struct drm_info_list *files, int count, + struct drm_minor *minor) { - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - struct drm_device_dma *dma = dev->dma; + struct list_head *pos, *q; + struct drm_info_node *tmp; int i; - if (!dma || offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].buf_count) - DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", - i, - dma->bufs[i].buf_size, - dma->bufs[i].buf_count, - atomic_read(&dma->bufs[i] - .freelist.count), - dma->bufs[i].seg_count, - dma->bufs[i].seg_count - * (1 << dma->bufs[i].page_order), - (dma->bufs[i].seg_count - * (1 << dma->bufs[i].page_order)) - * PAGE_SIZE / 1024); - } - DRM_PROC_PRINT("\n"); - for (i = 0; i < dma->buf_count; i++) { - if (i && !(i % 32)) - DRM_PROC_PRINT("\n"); - DRM_PROC_PRINT(" %d", dma->buflist[i]->list); + for (i = 0; i < count; i++) { + list_for_each_safe(pos, q, &minor->proc_nodes.list) { + tmp = list_entry(pos, struct drm_info_node, list); + if (tmp->info_ent == &files[i]) { + remove_proc_entry(files[i].name, + minor->proc_root); + list_del(pos); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); + } + } } - DRM_PROC_PRINT("\n"); - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_bufs_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__bufs_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } /** - * Called when "/proc/dri/.../vblank" is read. + * Cleanup the proc filesystem resources. * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - */ -static int drm__vblank_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - int crtc; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - for (crtc = 0; crtc < dev->num_crtcs; crtc++) { - DRM_PROC_PRINT("CRTC %d enable: %d\n", - crtc, atomic_read(&dev->vblank_refcount[crtc])); - DRM_PROC_PRINT("CRTC %d counter: %d\n", - crtc, drm_vblank_count(dev, crtc)); - DRM_PROC_PRINT("CRTC %d last wait: %d\n", - crtc, dev->last_vblank_wait[crtc]); - DRM_PROC_PRINT("CRTC %d in modeset: %d\n", - crtc, dev->vblank_inmodeset[crtc]); - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_vblank_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__vblank_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/** - * Called when "/proc/dri/.../clients" is read. + * \param minor device minor number. + * \param root DRI proc dir entry. + * \param dev_root DRI device proc dir entry. + * \return always zero. * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. + * Remove all proc entries created by proc_init(). */ -static int drm__clients_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) +int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) { - struct drm_minor *minor = (struct drm_minor *) data; struct drm_device *dev = minor->dev; - int len = 0; - struct drm_file *priv; + char name[64]; - if (offset > DRM_PROC_LIMIT) { - *eof = 1; + if (!root || !minor->proc_root) return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); - list_for_each_entry(priv, &dev->filelist, lhead) { - DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", - priv->authenticated ? 'y' : 'n', - priv->minor->index, - priv->pid, - priv->uid, priv->magic, priv->ioctl_count); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_clients_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__clients_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -struct drm_gem_name_info_data { - int len; - char *buf; - int eof; -}; + if (dev->driver->proc_cleanup) + dev->driver->proc_cleanup(minor); -static int drm_gem_one_name_info(int id, void *ptr, void *data) -{ - struct drm_gem_object *obj = ptr; - struct drm_gem_name_info_data *nid = data; + drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); - DRM_INFO("name %d size %zd\n", obj->name, obj->size); - if (nid->eof) - return 0; + sprintf(name, "%d", minor->index); + remove_proc_entry(name, root); - nid->len += sprintf(&nid->buf[nid->len], - "%6d %8zd %7d %8d\n", - obj->name, obj->size, - atomic_read(&obj->handlecount.refcount), - atomic_read(&obj->refcount.refcount)); - if (nid->len > DRM_PROC_LIMIT) { - nid->eof = 1; - return 0; - } return 0; } -static int drm_gem_name_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_gem_name_info_data nid; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - nid.len = sprintf(buf, " name size handles refcount\n"); - nid.buf = buf; - nid.eof = 0; - idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); - - *start = &buf[offset]; - *eof = 0; - if (nid.len > request + offset) - return request; - *eof = 1; - return nid.len - offset; -} - -static int drm_gem_object_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); - DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); - DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); - DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); - DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); - DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -#if DRM_DEBUG_CODE - -static int drm__vma_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - struct drm_vma_entry *pt; - struct vm_area_struct *vma; -#if defined(__i386__) - unsigned int pgprot; -#endif - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", - atomic_read(&dev->vma_count), - high_memory, virt_to_phys(high_memory)); - list_for_each_entry(pt, &dev->vmalist, head) { - if (!(vma = pt->vma)) - continue; - DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", - pt->pid, - vma->vm_start, - vma->vm_end, - vma->vm_flags & VM_READ ? 'r' : '-', - vma->vm_flags & VM_WRITE ? 'w' : '-', - vma->vm_flags & VM_EXEC ? 'x' : '-', - vma->vm_flags & VM_MAYSHARE ? 's' : 'p', - vma->vm_flags & VM_LOCKED ? 'l' : '-', - vma->vm_flags & VM_IO ? 'i' : '-', - vma->vm_pgoff); - -#if defined(__i386__) - pgprot = pgprot_val(vma->vm_page_prot); - DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", - pgprot & _PAGE_PRESENT ? 'p' : '-', - pgprot & _PAGE_RW ? 'w' : 'r', - pgprot & _PAGE_USER ? 'u' : 's', - pgprot & _PAGE_PWT ? 't' : 'b', - pgprot & _PAGE_PCD ? 'u' : 'c', - pgprot & _PAGE_ACCESSED ? 'a' : '-', - pgprot & _PAGE_DIRTY ? 'd' : '-', - pgprot & _PAGE_PSE ? 'm' : 'k', - pgprot & _PAGE_GLOBAL ? 'g' : 'l'); -#endif - DRM_PROC_PRINT("\n"); - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int drm_vma_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__vma_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} -#endif diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 7c8b15b..48f33be 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -50,6 +50,7 @@ struct idr drm_minors_idr; struct class *drm_class; struct proc_dir_entry *drm_proc_root; +struct dentry *drm_debugfs_root; static int drm_minor_get_id(struct drm_device *dev, int type) { @@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t goto err_mem; } } else - new_minor->dev_root = NULL; + new_minor->proc_root = NULL; + +#if defined(CONFIG_DEBUG_FS) + ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); + if (ret) { + DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n"); + goto err_g2; + } +#endif ret = drm_sysfs_device_add(new_minor); if (ret) { @@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p) if (minor->type == DRM_MINOR_LEGACY) drm_proc_cleanup(minor, drm_proc_root); +#if defined(CONFIG_DEBUG_FS) + drm_debugfs_cleanup(minor); +#endif + drm_sysfs_device_remove(minor); idr_remove(&drm_minors_idr, minor->index); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index e5f4ae9..c19a93c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -758,6 +758,8 @@ struct drm_driver { int (*proc_init)(struct drm_minor *minor); void (*proc_cleanup)(struct drm_minor *minor); + int (*debugfs_init)(struct drm_minor *minor); + void (*debugfs_cleanup)(struct drm_minor *minor); /** * Driver-specific constructor for drm_gem_objects, to set up @@ -793,6 +795,48 @@ struct drm_driver { #define DRM_MINOR_CONTROL 2 #define DRM_MINOR_RENDER 3 + +/** + * debugfs node list. This structure represents a debugfs file to + * be created by the drm core + */ +struct drm_debugfs_list { + const char *name; /** file name */ + int (*show)(struct seq_file*, void*); /** show callback */ + u32 driver_features; /**< Required driver features for this entry */ +}; + +/** + * debugfs node structure. This structure represents a debugfs file. + */ +struct drm_debugfs_node { + struct list_head list; + struct drm_minor *minor; + struct drm_debugfs_list *debugfs_ent; + struct dentry *dent; +}; + +/** + * Info file list entry. This structure represents a debugfs or proc file to + * be created by the drm core + */ +struct drm_info_list { + const char *name; /** file name */ + int (*show)(struct seq_file*, void*); /** show callback */ + u32 driver_features; /**< Required driver features for this entry */ + void *data; +}; + +/** + * debugfs node structure. This structure represents a debugfs file. + */ +struct drm_info_node { + struct list_head list; + struct drm_minor *minor; + struct drm_info_list *info_ent; + struct dentry *dent; +}; + /** * DRM minor structure. This structure represents a drm minor number. */ @@ -802,7 +846,12 @@ struct drm_minor { dev_t device; /**< Device number for mknod */ struct device kdev; /**< Linux device */ struct drm_device *dev; - struct proc_dir_entry *dev_root; /**< proc directory entry */ + + struct proc_dir_entry *proc_root; /**< proc directory entry */ + struct drm_info_node proc_nodes; + struct dentry *debugfs_root; + struct drm_info_node debugfs_nodes; + struct drm_master *master; /* currently active master for this node */ struct list_head master_list; struct drm_mode_group mode_group; @@ -1258,6 +1307,7 @@ extern unsigned int drm_debug; extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; +extern struct dentry *drm_debugfs_root; extern struct idr drm_minors_idr; @@ -1268,6 +1318,31 @@ extern int drm_proc_init(struct drm_minor *minor, int minor_id, struct proc_dir_entry *root); extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); + /* Debugfs support */ +#if defined(CONFIG_DEBUG_FS) +extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, + struct dentry *root); +extern int drm_debugfs_create_files(struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor); +extern int drm_debugfs_remove_files(struct drm_info_list *files, int count, + struct drm_minor *minor); +extern int drm_debugfs_cleanup(struct drm_minor *minor); +#endif + + /* Info file support */ +extern int drm_name_info(struct seq_file *m, void *data); +extern int drm_vm_info(struct seq_file *m, void *data); +extern int drm_queues_info(struct seq_file *m, void *data); +extern int drm_bufs_info(struct seq_file *m, void *data); +extern int drm_vblank_info(struct seq_file *m, void *data); +extern int drm_clients_info(struct seq_file *m, void* data); +extern int drm_gem_name_info(struct seq_file *m, void *data); +extern int drm_gem_object_info(struct seq_file *m, void* data); + +#if DRM_DEBUG_CODE +extern int drm_vma_info(struct seq_file *m, void *data); +#endif + /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, -- cgit v0.10.2 From 2017263e9e72974610179beaa85c4498b9c4b7a4 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 17 Feb 2009 20:08:50 -0500 Subject: drm/i915: Convert i915 proc files to seq_file and move to debugfs. Signed-off-by: Ben Gamari Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 793cba3..51c5a05 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_suspend.o \ i915_gem.o \ i915_gem_debug.o \ - i915_gem_proc.o \ + i915_gem_debugfs.o \ i915_gem_tiling.o \ intel_display.o \ intel_crt.o \ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b293ef0..dcb91f5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -150,8 +150,10 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .master_create = i915_master_create, .master_destroy = i915_master_destroy, - .proc_init = i915_gem_proc_init, - .proc_cleanup = i915_gem_proc_cleanup, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = i915_gem_debugfs_init, + .debugfs_cleanup = i915_gem_debugfs_cleanup, +#endif .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c02ce6..1c03b3e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -605,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data, int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); -int i915_gem_proc_init(struct drm_minor *minor); -void i915_gem_proc_cleanup(struct drm_minor *minor); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); @@ -650,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark); void i915_dump_lru(struct drm_device *dev, const char *where); +/* i915_debugfs.c */ +int i915_gem_debugfs_init(struct drm_minor *minor); +void i915_gem_debugfs_cleanup(struct drm_minor *minor); + /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c new file mode 100644 index 0000000..dd2b0ed --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -0,0 +1,230 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Keith Packard + * + */ + +#include +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#define DRM_I915_RING_DEBUG 1 + + +#if defined(CONFIG_DEBUG_FS) + +static int i915_gem_active_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + seq_printf(m, "Active:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + seq_printf(m, " %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + seq_printf(m, " %p: %08x %08x %d\n", + obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + return 0; +} + +static int i915_gem_flushing_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + seq_printf(m, "Flushing:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + seq_printf(m, " %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + seq_printf(m, " %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + return 0; +} + +static int i915_gem_inactive_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + seq_printf(m, "Inactive:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + seq_printf(m, " %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + seq_printf(m, " %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + return 0; +} + +static int i915_gem_request_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_request *gem_request; + + seq_printf(m, "Request:\n"); + list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + return 0; +} + +static int i915_gem_seqno_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->hw_status_page != NULL) { + seq_printf(m, "Current sequence: %d\n", + i915_get_gem_seqno(dev)); + } else { + seq_printf(m, "Current sequence: hws uninitialized\n"); + } + seq_printf(m, "Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + return 0; +} + + +static int i915_interrupt_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + seq_printf(m, "Interrupt enable: %08x\n", + I915_READ(IER)); + seq_printf(m, "Interrupt identity: %08x\n", + I915_READ(IIR)); + seq_printf(m, "Interrupt mask: %08x\n", + I915_READ(IMR)); + seq_printf(m, "Pipe A stat: %08x\n", + I915_READ(PIPEASTAT)); + seq_printf(m, "Pipe B stat: %08x\n", + I915_READ(PIPEBSTAT)); + seq_printf(m, "Interrupts received: %d\n", + atomic_read(&dev_priv->irq_received)); + if (dev_priv->hw_status_page != NULL) { + seq_printf(m, "Current sequence: %d\n", + i915_get_gem_seqno(dev)); + } else { + seq_printf(m, "Current sequence: hws uninitialized\n"); + } + seq_printf(m, "Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + seq_printf(m, "IRQ sequence: %d\n", + dev_priv->mm.irq_gem_seqno); + return 0; +} + +static int i915_hws_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + volatile u32 *hws; + + hws = (volatile u32 *)dev_priv->hw_status_page; + if (hws == NULL) + return 0; + + for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { + seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", + i * 4, + hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); + } + return 0; +} + +static struct drm_info_list i915_gem_debugfs_list[] = { + {"i915_gem_active", i915_gem_active_info, 0}, + {"i915_gem_flushing", i915_gem_flushing_info, 0}, + {"i915_gem_inactive", i915_gem_inactive_info, 0}, + {"i915_gem_request", i915_gem_request_info, 0}, + {"i915_gem_seqno", i915_gem_seqno_info, 0}, + {"i915_gem_interrupt", i915_interrupt_info, 0}, + {"i915_gem_hws", i915_hws_info, 0}, +}; +#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) + +int i915_gem_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(i915_gem_debugfs_list, + I915_GEM_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); +} + +void i915_gem_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(i915_gem_debugfs_list, + I915_GEM_DEBUGFS_ENTRIES, minor); +} + +#endif /* CONFIG_DEBUG_FS */ + diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c deleted file mode 100644 index 4d1b9de..0000000 --- a/drivers/gpu/drm/i915/i915_gem_proc.c +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright © 2008 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Eric Anholt - * Keith Packard - * - */ - -#include "drmP.h" -#include "drm.h" -#include "i915_drm.h" -#include "i915_drv.h" - -static int i915_gem_active_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Active:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_flushing_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Flushing:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_inactive_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Inactive:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_request_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_request *gem_request; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Request:\n"); - list_for_each_entry(gem_request, &dev_priv->mm.request_list, - list) - { - DRM_PROC_PRINT(" %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_seqno_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - if (dev_priv->hw_status_page != NULL) { - DRM_PROC_PRINT("Current sequence: %d\n", - i915_get_gem_seqno(dev)); - } else { - DRM_PROC_PRINT("Current sequence: hws uninitialized\n"); - } - DRM_PROC_PRINT("Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - - -static int i915_interrupt_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Interrupt enable: %08x\n", - I915_READ(IER)); - DRM_PROC_PRINT("Interrupt identity: %08x\n", - I915_READ(IIR)); - DRM_PROC_PRINT("Interrupt mask: %08x\n", - I915_READ(IMR)); - DRM_PROC_PRINT("Pipe A stat: %08x\n", - I915_READ(PIPEASTAT)); - DRM_PROC_PRINT("Pipe B stat: %08x\n", - I915_READ(PIPEBSTAT)); - DRM_PROC_PRINT("Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); - if (dev_priv->hw_status_page != NULL) { - DRM_PROC_PRINT("Current sequence: %d\n", - i915_get_gem_seqno(dev)); - } else { - DRM_PROC_PRINT("Current sequence: hws uninitialized\n"); - } - DRM_PROC_PRINT("Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - DRM_PROC_PRINT("IRQ sequence: %d\n", - dev_priv->mm.irq_gem_seqno); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_hws_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int len = 0, i; - volatile u32 *hws; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - hws = (volatile u32 *)dev_priv->hw_status_page; - if (hws == NULL) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { - DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", - i * 4, - hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static struct drm_proc_list { - /** file name */ - const char *name; - /** proc callback*/ - int (*f) (char *, char **, off_t, int, int *, void *); -} i915_gem_proc_list[] = { - {"i915_gem_active", i915_gem_active_info}, - {"i915_gem_flushing", i915_gem_flushing_info}, - {"i915_gem_inactive", i915_gem_inactive_info}, - {"i915_gem_request", i915_gem_request_info}, - {"i915_gem_seqno", i915_gem_seqno_info}, - {"i915_gem_interrupt", i915_interrupt_info}, - {"i915_gem_hws", i915_hws_info}, -}; - -#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) - -int i915_gem_proc_init(struct drm_minor *minor) -{ - struct proc_dir_entry *ent; - int i, j; - - for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { - ent = create_proc_entry(i915_gem_proc_list[i].name, - S_IFREG | S_IRUGO, minor->dev_root); - if (!ent) { - DRM_ERROR("Cannot create /proc/dri/.../%s\n", - i915_gem_proc_list[i].name); - for (j = 0; j < i; j++) - remove_proc_entry(i915_gem_proc_list[i].name, - minor->dev_root); - return -1; - } - ent->read_proc = i915_gem_proc_list[i].f; - ent->data = minor; - } - return 0; -} - -void i915_gem_proc_cleanup(struct drm_minor *minor) -{ - int i; - - if (!minor->dev_root) - return; - - for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) - remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); -} -- cgit v0.10.2 From 433e12f78b68a8069f54956edf766bb21394c197 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 17 Feb 2009 20:08:51 -0500 Subject: drm/i915: Consolidate gem object list dumping Here we eliminate a few functions in favor of using a single function to dump from all of the object lists. Signed-Off-By: Ben Gamari Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index dd2b0ed..4fc845c 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -37,69 +37,38 @@ #if defined(CONFIG_DEBUG_FS) -static int i915_gem_active_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - seq_printf(m, "Active:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - seq_printf(m, " %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - seq_printf(m, " %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - return 0; -} +#define ACTIVE_LIST 1 +#define FLUSHING_LIST 2 +#define INACTIVE_LIST 3 -static int i915_gem_flushing_info(struct seq_file *m, void *data) +static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; + uintptr_t list = (uintptr_t) node->info_ent->data; + struct list_head *head; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; - seq_printf(m, "Flushing:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - seq_printf(m, " %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - seq_printf(m, " %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } + switch (list) { + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + head = &dev_priv->mm.active_list; + break; + case INACTIVE_LIST: + seq_printf(m, "Inctive:\n"); + head = &dev_priv->mm.inactive_list; + break; + case FLUSHING_LIST: + seq_printf(m, "Flushing:\n"); + head = &dev_priv->mm.flushing_list; + break; + default: + DRM_INFO("Ooops, unexpected list\n"); + return 0; } - return 0; -} -static int i915_gem_inactive_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - seq_printf(m, "Inactive:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + list_for_each_entry(obj_priv, head, list) { struct drm_gem_object *obj = obj_priv->obj; if (obj->name) { @@ -108,7 +77,8 @@ static int i915_gem_inactive_info(struct seq_file *m, void *data) obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno); } else { - seq_printf(m, " %p: %08x %08x %d\n", obj, + seq_printf(m, " %p: %08x %08x %d\n", + obj, obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno); } @@ -203,9 +173,9 @@ static int i915_hws_info(struct seq_file *m, void *data) } static struct drm_info_list i915_gem_debugfs_list[] = { - {"i915_gem_active", i915_gem_active_info, 0}, - {"i915_gem_flushing", i915_gem_flushing_info, 0}, - {"i915_gem_inactive", i915_gem_inactive_info, 0}, + {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, + {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, + {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, -- cgit v0.10.2 From f4ceda89895b56e2c03dd327f13d0256838a20ab Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 17 Feb 2009 23:53:41 -0800 Subject: drm/i915: Add information on pinning and fencing to the i915 list debug. This was inspired by a patch by Chris Wilson, though none of it applied in any way due to the debugfs work and I decided to change the formatting of the new information anyway. Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 4fc845c..f7e7d37 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -70,18 +70,27 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) list_for_each_entry(obj_priv, head, list) { + char *pin_description; struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - seq_printf(m, " %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - seq_printf(m, " %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } + + if (obj_priv->user_pin_count > 0) + pin_description = "P"; + else if (obj_priv->pin_count > 0) + pin_description = "p"; + else + pin_description = " "; + + seq_printf(m, " %p: %s %08x %08x %d", + obj, + pin_description, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + + if (obj->name) + seq_printf(m, " (name: %d)", obj->name); + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); + seq_printf(m, "\n"); } return 0; } -- cgit v0.10.2 From a6172a80ecb7ac64151960de1f709f78b509c57c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Feb 2009 14:26:38 +0000 Subject: drm/i915: Display fence register state in debugfs i915_gem_fence_regs node. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index f7e7d37..5a4cdb5 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -41,6 +41,26 @@ #define FLUSHING_LIST 2 #define INACTIVE_LIST 3 +static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) +{ + if (obj_priv->user_pin_count > 0) + return "P"; + else if (obj_priv->pin_count > 0) + return "p"; + else + return " "; +} + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) +{ + switch (obj_priv->tiling_mode) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -70,19 +90,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) list_for_each_entry(obj_priv, head, list) { - char *pin_description; struct drm_gem_object *obj = obj_priv->obj; - if (obj_priv->user_pin_count > 0) - pin_description = "P"; - else if (obj_priv->pin_count > 0) - pin_description = "p"; - else - pin_description = " "; - seq_printf(m, " %p: %s %08x %08x %d", obj, - pin_description, + get_pin_flag(obj_priv), obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno); @@ -161,6 +173,41 @@ static int i915_interrupt_info(struct seq_file *m, void *data) return 0; } +static int i915_gem_fence_regs_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); + seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; + + if (obj == NULL) { + seq_printf(m, "Fenced object[%2d] = unused\n", i); + } else { + struct drm_i915_gem_object *obj_priv; + + obj_priv = obj->driver_private; + seq_printf(m, "Fenced object[%2d] = %p: %s " + "%08x %08x %08x %s %08x %08x %d", + i, obj, get_pin_flag(obj_priv), + obj_priv->gtt_offset, + obj->size, obj_priv->stride, + get_tiling_flag(obj_priv), + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + if (obj->name) + seq_printf(m, " (name: %d)", obj->name); + seq_printf(m, "\n"); + } + } + + return 0; +} + static int i915_hws_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -187,6 +234,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, + {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_hws", i915_hws_info, 0}, }; -- cgit v0.10.2 From ad086c833d00ef3be56ec554b1061f19e87a6210 Mon Sep 17 00:00:00 2001 From: "Owain G. Ainsworth" Date: Fri, 20 Feb 2009 08:30:19 +0000 Subject: i915/drm: Remove two redundant agp_chipset_flushes agp_chipset_flush() is for flushing the intel GMCH write cache via the IFP, these two uses are for when we're getting the object into the cpu READ domain, and thus should not be needed. This confused me when I was getting my head around the code. With thanks to airlied for helping me check my mental picture of how the flushes and clflushes are supposed to be used. Signed-off-by: Owain G. Ainsworth Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f135c90..b52cba0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2381,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) { - struct drm_device *dev = obj->dev; int ret; i915_gem_object_flush_gpu_write_domain(obj); @@ -2400,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) /* Flush the CPU cache if it's still invalid. */ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); obj->read_domains |= I915_GEM_DOMAIN_CPU; } @@ -2612,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; if (!obj_priv->page_cpu_valid) @@ -2628,7 +2625,6 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) continue; drm_clflush_pages(obj_priv->pages + i, 1); } - drm_agp_chipset_flush(dev); } /* Free the page_cpu_valid mappings which are now stale, whether -- cgit v0.10.2 From 2177832f2e20fceb32142bb4fd33ae68c8af8c5a Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 23 Feb 2009 15:19:16 +0800 Subject: agp/intel: Add support for new intel chipset. This is a G33-like desktop and mobile chipset. Signed-off-by: Shaohua Li Signed-off-by: Eric Anholt diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 4373adb..9d9490e 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -26,6 +26,10 @@ #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE +#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010 +#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011 +#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000 +#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 @@ -60,7 +64,12 @@ #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) + +#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ @@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void) size = 512; } size += 4; /* add in BIOS popup space */ - } else if (IS_G33) { + } else if (IS_G33 && !IS_IGD) { /* G33's GTT size defined in gmch_ctrl */ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { case G33_PGETBL_SIZE_1M: @@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void) size = 512; } size += 4; - } else if (IS_G4X) { + } else if (IS_G4X || IS_IGD) { /* On 4 series hardware, GTT stolen is separate from graphics * stolen, ignore it in stolen gtt entries counting. However, * 4KB of the stolen memory doesn't get mapped to the GTT. @@ -2161,6 +2170,10 @@ static const struct intel_driver_description { NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", NULL, &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD", + NULL, &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD", + NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, "Mobile Intel® GM45 Express", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, @@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_82945G_HB), ID(PCI_DEVICE_ID_INTEL_82945GM_HB), ID(PCI_DEVICE_ID_INTEL_82945GME_HB), + ID(PCI_DEVICE_ID_INTEL_IGDGM_HB), + ID(PCI_DEVICE_ID_INTEL_IGDG_HB), ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), ID(PCI_DEVICE_ID_INTEL_82G35_HB), ID(PCI_DEVICE_ID_INTEL_82965Q_HB), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1c03b3e..c1685d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -787,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E22 || \ IS_GM45(dev)) +#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) +#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) +#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) + #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ (dev)->pci_device == 0x29B2 || \ - (dev)->pci_device == 0x29D2) + (dev)->pci_device == 0x29D2 || \ + (IS_IGD(dev))) #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ - IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) + IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ + IS_IGD(dev)) #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 90600d8..6d56777 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -359,6 +359,7 @@ #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ +#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) #define I915_CRC_ERROR_ENABLE (1UL<<29) @@ -435,6 +436,7 @@ */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 +#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 /* i830, required in DVO non-gang */ #define PLL_P2_DIVIDE_BY_4 (1 << 23) #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ @@ -501,10 +503,12 @@ #define FPB0 0x06048 #define FPB1 0x0604c #define FP_N_DIV_MASK 0x003f0000 +#define FP_N_IGD_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 #define FP_M1_DIV_MASK 0x00003f00 #define FP_M1_DIV_SHIFT 8 #define FP_M2_DIV_MASK 0x0000003f +#define FP_M2_IGD_DIV_MASK 0x000000ff #define FP_M2_DIV_SHIFT 0 #define DPLL_TEST 0x606c #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0d40b4b..d9c50ff 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -92,18 +92,32 @@ struct intel_limit { #define I9XX_DOT_MAX 400000 #define I9XX_VCO_MIN 1400000 #define I9XX_VCO_MAX 2800000 +#define IGD_VCO_MIN 1700000 +#define IGD_VCO_MAX 3500000 #define I9XX_N_MIN 1 #define I9XX_N_MAX 6 +/* IGD's Ncounter is a ring counter */ +#define IGD_N_MIN 3 +#define IGD_N_MAX 6 #define I9XX_M_MIN 70 #define I9XX_M_MAX 120 +#define IGD_M_MIN 2 +#define IGD_M_MAX 256 #define I9XX_M1_MIN 10 #define I9XX_M1_MAX 22 #define I9XX_M2_MIN 5 #define I9XX_M2_MAX 9 +/* IGD M1 is reserved, and must be 0 */ +#define IGD_M1_MIN 0 +#define IGD_M1_MAX 0 +#define IGD_M2_MIN 0 +#define IGD_M2_MAX 254 #define I9XX_P_SDVO_DAC_MIN 5 #define I9XX_P_SDVO_DAC_MAX 80 #define I9XX_P_LVDS_MIN 7 #define I9XX_P_LVDS_MAX 98 +#define IGD_P_LVDS_MIN 7 +#define IGD_P_LVDS_MAX 112 #define I9XX_P1_MIN 1 #define I9XX_P1_MAX 8 #define I9XX_P2_SDVO_DAC_SLOW 10 @@ -121,6 +135,8 @@ struct intel_limit { #define INTEL_LIMIT_G4X_HDMI_DAC 5 #define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 +#define INTEL_LIMIT_IGD_SDVO_DAC 8 +#define INTEL_LIMIT_IGD_LVDS 9 /*The parameter is for SDVO on G4x platform*/ #define G4X_DOT_SDVO_MIN 25000 @@ -340,6 +356,32 @@ static const intel_limit_t intel_limits[] = { }, .find_pll = intel_g4x_find_best_PLL, }, + { /* INTEL_LIMIT_IGD_SDVO */ + .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, + .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, + .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, + .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, + .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, + .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, + .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, + .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, + .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, + .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, + }, + { /* INTEL_LIMIT_IGD_LVDS */ + .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, + .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, + .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, + .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, + .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, + .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, + .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, + .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, + /* IGD only supports single-channel mode. */ + .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, + .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, + }, + }; static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) @@ -376,11 +418,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); - } else if (IS_I9XX(dev)) { + } else if (IS_I9XX(dev) && !IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; else limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + } else if (IS_IGD(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; + else + limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; @@ -390,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) return limit; } -static void intel_clock(int refclk, intel_clock_t *clock) +/* m1 is reserved as 0 in IGD, n is a ring counter */ +static void igd_clock(int refclk, intel_clock_t *clock) { + clock->m = clock->m2 + 2; + clock->p = clock->p1 * clock->p2; + clock->vco = refclk * clock->m / clock->n; + clock->dot = clock->vco / clock->p; +} + +static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) +{ + if (IS_IGD(dev)) { + igd_clock(refclk, clock); + return; + } clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); @@ -427,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) { const intel_limit_t *limit = intel_limit (crtc); + struct drm_device *dev = crtc->dev; if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid ("p1 out of range\n"); @@ -436,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) INTELPllInvalid ("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid ("m1 out of range\n"); - if (clock->m1 <= clock->m2) + if (clock->m1 <= clock->m2 && !IS_IGD(dev)) INTELPllInvalid ("m1 <= m2\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) INTELPllInvalid ("m out of range\n"); @@ -486,15 +547,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, memset (best_clock, 0, sizeof (*best_clock)); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { - for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && - clock.m2 <= limit->m2.max; clock.m2++) { + for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { + /* m1 is always 0 in IGD */ + if (clock.m2 >= clock.m1 && !IS_IGD(dev)) + break; for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; - intel_clock(refclk, &clock); + intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(crtc, &clock)) continue; @@ -551,7 +614,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, clock.p1 >= limit->p1.min; clock.p1--) { int this_err; - intel_clock(refclk, &clock); + intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(crtc, &clock)) continue; this_err = abs(clock.dot - target) ; @@ -888,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev) return 400000; else if (IS_I915G(dev)) return 333000; - else if (IS_I945GM(dev) || IS_845G(dev)) + else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) return 200000; else if (IS_I915GM(dev)) { u16 gcfgc = 0; @@ -1043,7 +1106,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; } - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; + if (IS_IGD(dev)) + fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; + else + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; dpll = DPLL_VGA_MODE_DIS; if (IS_I9XX(dev)) { @@ -1060,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* compute bitmask from p1 value */ - dpll |= (1 << (clock.p1 - 1)) << 16; + if (IS_IGD(dev)) + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; + else + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; @@ -1540,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) fp = I915_READ((pipe == 0) ? FPA1 : FPB1); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; - clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; - clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + if (IS_IGD(dev)) { + clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; + clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; + } else { + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; + } + if (IS_I9XX(dev)) { - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> + if (IS_IGD(dev)) + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> + DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); + else + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> DPLL_FPA01_P1_POST_DIV_SHIFT); switch (dpll & DPLL_MODE_MASK) { @@ -1562,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } /* XXX: Handle the 100Mhz refclk */ - intel_clock(96000, &clock); + intel_clock(dev, 96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); @@ -1574,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ - intel_clock(66000, &clock); + intel_clock(dev, 66000, &clock); } else - intel_clock(48000, &clock); + intel_clock(dev, 48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; @@ -1589,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) else clock.p2 = 2; - intel_clock(48000, &clock); + intel_clock(dev, 48000, &clock); } } diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 5165f24..76c4c82 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -418,4 +418,6 @@ {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0, 0, 0} -- cgit v0.10.2 From ba01079c71559304771f9d741c9bbe8b2eac22a2 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 20:23:02 +0800 Subject: drm/i915: TV modes' parameters sync up with 2D driver This covers at least: TV: subcarrier fix for NTSC and PAL TV: fix timing parameters for PAL, 480p, 1080i Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 56485d6..0e60685 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -217,8 +217,8 @@ static const u32 filter_table[] = { */ static const struct color_conversion ntsc_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, - .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, - .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_composite = { @@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = { }; static const struct color_conversion ntsc_m_csc_svideo = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, - .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, - .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_svideo = { @@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = { static const struct color_conversion ntsc_j_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, - .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, - .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, + .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, + .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_composite = { @@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = { static const struct color_conversion ntsc_j_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, - .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, - .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, + .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, + .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_svideo = { @@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = { static const struct color_conversion pal_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, - .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, - .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, + .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, + .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, }; static const struct video_levels pal_levels_composite = { @@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = { static const struct color_conversion pal_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, - .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, - .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, + .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, + .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, }; static const struct video_levels pal_levels_svideo = { @@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = { static const struct color_conversion pal_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, - .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, - .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_m_levels_composite = { @@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = { }; static const struct color_conversion pal_m_csc_svideo = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, - .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, - .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_m_levels_svideo = { @@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = { static const struct color_conversion pal_n_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, - .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, - .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_n_levels_composite = { @@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = { }; static const struct color_conversion pal_n_csc_svideo = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, - .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, - .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_n_levels_svideo = { @@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = { * Component connections */ static const struct color_conversion sdtv_csc_yprpb = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, - .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, - .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, + .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, + .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, }; static const struct color_conversion sdtv_csc_rgb = { @@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = { }; static const struct color_conversion hdtv_csc_yprpb = { - .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, - .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, - .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, + .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, + .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, + .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, }; static const struct color_conversion hdtv_csc_rgb = { @@ -414,7 +414,7 @@ struct tv_mode { static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = { .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ - .dda1_inc = 136, - .dda2_inc = 7624, .dda2_size = 20013, + .dda1_inc = 135, + .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, @@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = { }, { .name = "NTSC-443", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = { /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, - .dda2_inc = 18557, .dda2_size = 20625, - .dda3_inc = 0, .dda3_size = 0, - .sc_reset = TV_SC_RESET_EVERY_8, - .pal_burst = true, + .dda2_inc = 4093, .dda2_size = 27456, + .dda3_inc = 310, .dda3_size = 525, + .sc_reset = TV_SC_RESET_NEVER, + .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, @@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = { }, { .name = "NTSC-J", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = { .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ - .dda1_inc = 136, - .dda2_inc = 7624, .dda2_size = 20013, + .dda1_inc = 135, + .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, @@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = { }, { .name = "PAL-M", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = { .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ - .dda1_inc = 136, - .dda2_inc = 7624, .dda2_size = 20013, + .dda1_inc = 135, + .dda2_inc = 16704, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, - .sc_reset = TV_SC_RESET_EVERY_4, - .pal_burst = false, + .sc_reset = TV_SC_RESET_EVERY_8, + .pal_burst = true, .composite_levels = &pal_m_levels_composite, .composite_color = &pal_m_csc_composite, @@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = { { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL-N", - .clock = 107520, + .clock = 108000, .refresh = 25000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = { /* desired 4.4336180 actual 4.4336180 clock 107.52 */ - .dda1_inc = 168, - .dda2_inc = 18557, .dda2_size = 20625, - .dda3_inc = 0, .dda3_size = 0, + .dda1_inc = 135, + .dda2_inc = 23578, .dda2_size = 27648, + .dda3_inc = 134, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, @@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = { { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL", - .clock = 107520, + .clock = 108000, .refresh = 25000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, - .hsync_end = 64, .hblank_end = 128, + .hsync_end = 64, .hblank_end = 142, .hblank_start = 844, .htotal = 863, .progressive = false, .trilevel_sync = false, @@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = { /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, - .dda2_inc = 18557, .dda2_size = 20625, - .dda3_inc = 0, .dda3_size = 0, + .dda2_inc = 4122, .dda2_size = 27648, + .dda3_inc = 67, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, @@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = { .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, - .nbr_end = 496, + .nbr_end = 479, .burst_ena = false, @@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = { .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, - .nbr_end = 496, + .nbr_end = 479, .burst_ena = false, @@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = { .component_only = 1, .hsync_end = 88, .hblank_end = 235, - .hblank_start = 2155, .htotal = 2200, + .hblank_start = 2155, .htotal = 2201, .progressive = false, .trilevel_sync = true, -- cgit v0.10.2 From 6bcdcd9e3c09d133e3278edabebc314a2451b74a Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Tue, 3 Mar 2009 18:06:42 +0800 Subject: drm/i915: Sync mode_valid/mode_set with intel video driver This covers: Limit CRT DAC speed better. and also clears the border color in case it's set to some garbage, which would fix ugly outlines in the blank regions of the CRT. Signed-off-by: Zhao Yakui [anholt: replaced *drm_dev with *dev] Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index dcaed34..e58defa 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) static int intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *dev = connector->dev; + + int max_clock = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (mode->clock > 400000 || mode->clock < 25000) - return MODE_CLOCK_RANGE; + if (mode->clock < 25000) + return MODE_CLOCK_LOW; + + if (!IS_I9XX(dev)) + max_clock = 350000; + else + max_clock = 400000; + if (mode->clock > max_clock) + return MODE_CLOCK_HIGH; return MODE_OK; } @@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; - if (intel_crtc->pipe == 0) + if (intel_crtc->pipe == 0) { adpa |= ADPA_PIPE_A_SELECT; - else + I915_WRITE(BCLRPAT_A, 0); + } else { adpa |= ADPA_PIPE_B_SELECT; + I915_WRITE(BCLRPAT_B, 0); + } I915_WRITE(ADPA, adpa); } -- cgit v0.10.2 From 771cb081354161eea21534ba58e5cc1a2db94a25 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Tue, 3 Mar 2009 18:07:52 +0800 Subject: drm/i915: Sync crt hotplug detection with intel video driver This covers: Use long crt hotplug activation time on GM45. Signed-off-by: Zhao Yakui Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6d56777..05b1894 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -633,6 +633,22 @@ #define TV_HOTPLUG_INT_EN (1 << 18) #define CRT_HOTPLUG_INT_EN (1 << 9) #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) +#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) +/* must use period 64 on GM45 according to docs */ +#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) +#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) +#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) +#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) +#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) +#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) +#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) +#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ + #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e58defa..2b6d443 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -146,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 temp; - - unsigned long timeout = jiffies + msecs_to_jiffies(1000); - - temp = I915_READ(PORT_HOTPLUG_EN); - - I915_WRITE(PORT_HOTPLUG_EN, - temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); + u32 hotplug_en; + int i, tries = 0; + /* + * On 4 series desktop, CRT detect sequence need to be done twice + * to get a reliable result. + */ - do { - if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) - break; - msleep(1); - } while (time_after(timeout, jiffies)); + if (IS_G4X(dev) && !IS_GM45(dev)) + tries = 2; + else + tries = 1; + hotplug_en = I915_READ(PORT_HOTPLUG_EN); + hotplug_en &= ~(CRT_HOTPLUG_MASK); + hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; + + if (IS_GM45(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + + for (i = 0; i < tries ; i++) { + unsigned long timeout; + /* turn on the FORCE_DETECT */ + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + timeout = jiffies + msecs_to_jiffies(1000); + /* wait for FORCE_DETECT to go off */ + do { + if (!(I915_READ(PORT_HOTPLUG_EN) & + CRT_HOTPLUG_FORCE_DETECT)) + break; + msleep(1); + } while (time_after(timeout, jiffies)); + } if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == CRT_HOTPLUG_MONITOR_COLOR) -- cgit v0.10.2 From 02c5dd985ddc5407aa9cc7e0f4456ca63b294f16 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 19:36:01 +0800 Subject: drm/i915: Fix TV get_modes to return modes count The get_modes hook must return the number of modes added. This also fixes TV mode's clock calculation int overflow issue, and use 0.01 precision for mode refresh validation. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 0e60685..08c4034 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); /* Ensure TV refresh is close to desired refresh */ - if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) + if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) return MODE_OK; return MODE_CLOCK_RANGE; } @@ -1495,7 +1495,8 @@ intel_tv_get_modes(struct drm_connector *connector) struct drm_display_mode *mode_ptr; struct intel_output *intel_output = to_intel_output(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); - int j; + int j, count = 0; + u64 tmp; for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); j++) { @@ -1510,8 +1511,9 @@ intel_tv_get_modes(struct drm_connector *connector) && !tv_mode->component_only)) continue; - mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), - DRM_MEM_DRIVER); + mode_ptr = drm_mode_create(connector->dev); + if (!mode_ptr) + continue; strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); mode_ptr->hdisplay = hactive_s; @@ -1528,15 +1530,17 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr->vsync_end = mode_ptr->vsync_start + 1; mode_ptr->vtotal = vactive_s + 33; - mode_ptr->clock = (int) (tv_mode->refresh * - mode_ptr->vtotal * - mode_ptr->htotal / 1000) / 1000; + tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; + tmp *= mode_ptr->htotal; + tmp = div_u64(tmp, 1000000); + mode_ptr->clock = (int) tmp; mode_ptr->type = DRM_MODE_TYPE_DRIVER; drm_mode_probed_add(connector, mode_ptr); + count++; } - return 0; + return count; } static void -- cgit v0.10.2 From d2d9f23240a7ec29a496ee072ffdf69c4f6cdc76 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 19:36:02 +0800 Subject: drm/i915: TV mode_set sync up with 2D driver Fix TV control save register for untouched bits, and color knobs different definition for 945 and 965 chips. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 05b1894..377cc58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -876,7 +876,7 @@ */ # define TV_ENC_C0_FIX (1 << 10) /** Bits that must be preserved by software */ -# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) +# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) # define TV_FUSE_STATE_MASK (3 << 4) /** Read-only state that reports all features enabled */ # define TV_FUSE_STATE_ENABLED (0 << 4) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 08c4034..7021798 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (!tv_mode) return; /* can't happen (mode_prepare prevents this) */ - tv_ctl = 0; + tv_ctl = I915_READ(TV_CTL); + tv_ctl &= TV_CTL_SAVE; switch (tv_priv->type) { default: @@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, /* dda1 implies valid video levels */ if (tv_mode->dda1_inc) { scctl1 |= TV_SC_DDA1_EN; - scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; } if (tv_mode->dda2_inc) @@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, scctl1 |= TV_SC_DDA3_EN; scctl1 |= tv_mode->sc_reset; + scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | @@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, color_conversion->av); } - I915_WRITE(TV_CLR_KNOBS, 0x00606000); + if (IS_I965G(dev)) + I915_WRITE(TV_CLR_KNOBS, 0x00404000); + else + I915_WRITE(TV_CLR_KNOBS, 0x00606000); + if (video_levels) I915_WRITE(TV_CLR_LEVEL, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | -- cgit v0.10.2 From bf5a269a4cc966f783b9faaf3fffd8fa31b53383 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 19:36:03 +0800 Subject: drm/i915: TV detection fix Check that the encoder has a real enabled crtc for TV detect, and fix missing TV type setting after detect. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 7021798..ceca947 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1406,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) tv_dac = I915_READ(TV_DAC); I915_WRITE(TV_DAC, save_tv_dac); I915_WRITE(TV_CTL, save_tv_ctl); + intel_wait_for_vblank(dev); } /* * A B C @@ -1456,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector) mode = reported_modes[0]; drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); - if (encoder->crtc) { + if (encoder->crtc && encoder->crtc->enabled) { type = intel_tv_detect_type(encoder->crtc, intel_output); } else { crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); @@ -1467,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector) type = -1; } + tv_priv->type = type; + if (type < 0) return connector_status_disconnected; -- cgit v0.10.2 From 98787c057fdefdce6230ff46f2c1105835005a4c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 Mar 2009 23:27:52 +0000 Subject: drm/i915: Check for dev->primary->master before dereference. I've hit the occasional oops inside i915_wait_ring() with an indication of a NULL derefence of dev->primary->master. Adding a NULL check is consistent with the other potential users of dev->primary->master. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ae83fe0..a818b37 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -41,7 +41,6 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; u32 last_acthd = I915_READ(acthd_reg); @@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) if (ring->space >= n) return 0; - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + if (dev->primary->master) { + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + } + if (ring->head != last_head) i = 0; -- cgit v0.10.2 From 2b5cde2b272f56ec67b56a2af8c067d42eff7328 Mon Sep 17 00:00:00 2001 From: Li Peng Date: Fri, 13 Mar 2009 10:25:07 +0800 Subject: drm/i915: Fix LVDS dither setting Update bdb_lvds_options structure according to its defination in 2D driver. Then we can parse and set 'lvds_dither' bit correctly on non-965 chips. Signed-off-by: Li Peng Signed-off-by: Eric Anholt diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 5ea715a..de621aa 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -162,13 +162,13 @@ struct bdb_lvds_options { u8 panel_type; u8 rsvd1; /* LVDS capabilities, stored in a dword */ - u8 rsvd2:1; - u8 lvds_edid:1; - u8 pixel_dither:1; - u8 pfit_ratio_auto:1; - u8 pfit_gfx_mode_enhanced:1; - u8 pfit_text_mode_enhanced:1; u8 pfit_mode:2; + u8 pfit_text_mode_enhanced:1; + u8 pfit_gfx_mode_enhanced:1; + u8 pfit_ratio_auto:1; + u8 pixel_dither:1; + u8 lvds_edid:1; + u8 rsvd2:1; u8 rsvd4; } __attribute__((packed)); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0d211af..6619f26 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, pfit_control = 0; if (!IS_I965G(dev)) { - if (dev_priv->panel_wants_dither) + if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; } else -- cgit v0.10.2 From 5b28beaf88436fa44fc25ee27a2fadffb75f222e Mon Sep 17 00:00:00 2001 From: Li Yang Date: Fri, 27 Mar 2009 15:54:30 -0700 Subject: gianfar: only check headroom when FCB is needed Signed-off-by: Li Yang Signed-off-by: David S. Miller diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 44cbf26..6a38800 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1310,8 +1310,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) base = priv->tx_bd_base; - /* make space for additional header */ - if (skb_headroom(skb) < GMAC_FCB_LEN) { + /* make space for additional header when fcb is needed */ + if (((skb->ip_summed == CHECKSUM_PARTIAL) || + (priv->vlgrp && vlan_tx_tag_present(skb))) && + (skb_headroom(skb) < GMAC_FCB_LEN)) { struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); -- cgit v0.10.2 From cc0be3227df9146968311308a9d19db1469ce1db Mon Sep 17 00:00:00 2001 From: Dmitri Vorobiev Date: Fri, 27 Mar 2009 15:55:36 -0700 Subject: net: Add missing include into include/linux/netdevice.h The inline function skb_gro_mac_header defined in include/linux/netdevice.h makes use of page_address(). Depending on configuration options, the latter is either defined as a macro or is declared as a function in another header file, namely include/linux/mm.h. However, include/linux/netdevice.h does not include include/linux/mm.h. On MIPS, this has produced the following build error: CC kernel/sysctl_check.o In file included from include/linux/icmpv6.h:173, from include/linux/ipv6.h:208, from include/net/ip_vs.h:26, from kernel/sysctl_check.c:6: include/linux/netdevice.h: In function 'skb_gro_mac_header': include/linux/netdevice.h:1132: error: implicit declaration of function 'page_address' include/linux/netdevice.h:1133: warning: pointer/integer type mismatch in conditional expression make[1]: *** [kernel/sysctl_check.o] Error 1 make: *** [kernel] Error 2 The patch adds the missing include and fixes the build error. Signed-off-by: Dmitri Vorobiev Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index be3ebd7..1b55952 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -32,6 +32,7 @@ #ifdef __KERNEL__ #include #include +#include #include #include #include -- cgit v0.10.2 From 79675900cbf2c4e67e95f94983ec4ee800b83739 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Fri, 27 Mar 2009 16:00:03 -0700 Subject: ucc_geth: Fix three oopses in PHY {de,}initialization code When there are no free snums, UCC ethernet should gracefully fail, but currently it oopses this way: # ifconfig eth0 up fill_init_enet_entries: Can not get SNUM. ucc_geth_startup: Can not fill p_init_enet_param_shadow. eth0: Cannot configure net device, aborting. Unable to handle kernel paging request for data at address 0x00000190 Faulting instruction address: 0xc0294c88 Oops: Kernel access of bad area, sig: 11 [#1] [...] NIP [c0294c88] mutex_lock+0x0/0x1c LR [c01b6be8] phy_stop+0x20/0x70 Call Trace: [efb25da0] [efb2eb60] 0xefb2eb60 (unreliable) [efb25db0] [c01b2058] ucc_geth_stop+0x2c/0x8c [efb25dd0] [c01b4194] ucc_geth_open+0x48/0x27c [efb25df0] [c020eec0] dev_open+0xc0/0x118 [...] This is because the ucc_geth_stop() routine assumes that ugeth->phydev is always initialized by the ucc_geth_open(), while it is not in case of errors. If we add a check to the ucc_geth_stop(), then another oops pops up: Unable to handle kernel paging request for data at address 0x00000004 Faulting instruction address: 0xc01b46a4 Oops: Kernel access of bad area, sig: 11 [#1] [...] NIP [c01b46a4] adjust_link+0x20/0x1b4 LR [c01b770c] phy_state_machine+0xdc/0x44c Call Trace: [ef83bf10] [c021b388] linkwatch_schedule_work+0x74/0xf8 (unreliable) [ef83bf40] [c01b770c] phy_state_machine+0xdc/0x44c [ef83bf60] [c004c13c] run_workqueue+0xb8/0x148 [ef83bf90] [c004c870] worker_thread+0x70/0xd0 [ef83bfd0] [c00505fc] kthread+0x48/0x84 [ef83bff0] [c000f464] kernel_thread+0x4c/0x68 [...] That one happens because ucc_geth_stop() does not call phy_disconnect() and so phylib state machine is running without any idea that a MAC has just died. Also, when device tree specifies fixed-link, and CONFIG_FIXED_PHY is disabled, we'll get this oops: 0:01 not found eth2: Could not attach to PHY eth2: Cannot initialize PHY, aborting. Unable to handle kernel paging request for data at address 0x00000190 Faulting instruction address: 0xc02967d0 Oops: Kernel access of bad area, sig: 11 [#1] [...] NIP [c02967d0] mutex_lock+0x0/0x1c LR [c01b6bcc] phy_stop+0x20/0x70 Call Trace: [ef82be50] [efb6bb60] 0xefb6bb60 (unreliable) [ef82be60] [c01b2058] ucc_geth_stop+0x2c/0x8c [ef82be80] [c01b4194] ucc_geth_open+0x48/0x27c [ef82bea0] [c0210a04] dev_open+0xc0/0x118 [ef82bec0] [c020f85c] dev_change_flags+0x84/0x1ac [ef82bee0] [c037b768] ic_open_devs+0x168/0x2bc [ef82bf20] [c037ca98] ip_auto_config+0x90/0x28c [ef82bf60] [c0001b9c] do_one_initcall+0x34/0x1a0 [ef82bfd0] [c035e240] do_initcalls+0x38/0x58 [ef82bfe0] [c035e2c4] kernel_init+0x30/0x90 [ef82bff0] [c000f464] kernel_thread+0x4c/0x68 [...] And again, ucc_geth_stop() assumes that ugeth->phydev is there, while it isn't. This patch fixes all three oopses simply by rearranging some code: - In ucc_geth_open(): move init_phy() call to the beginning, so that we only call ucc_geth_stop() with a PHY attached; - Move phy_disconnect() call from ucc_geth_close() to ucc_geth_stop(), so that we'll always disconnect the PHY. Signed-off-by: Anton Vorontsov Signed-off-by: David S. Miller diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index a110326..86a479f 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -2009,6 +2009,9 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) /* Disable Rx and Tx */ clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); + phy_disconnect(ugeth->phydev); + ugeth->phydev = NULL; + ucc_geth_memclean(ugeth); } @@ -3345,6 +3348,14 @@ static int ucc_geth_open(struct net_device *dev) return -EINVAL; } + err = init_phy(dev); + if (err) { + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot initialize PHY, aborting.", + dev->name); + return err; + } + err = ucc_struct_init(ugeth); if (err) { if (netif_msg_ifup(ugeth)) @@ -3381,13 +3392,6 @@ static int ucc_geth_open(struct net_device *dev) &ugeth->ug_regs->macstnaddr1, &ugeth->ug_regs->macstnaddr2); - err = init_phy(dev); - if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); - goto out_err; - } - phy_start(ugeth->phydev); err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); @@ -3430,9 +3434,6 @@ static int ucc_geth_close(struct net_device *dev) free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); - phy_disconnect(ugeth->phydev); - ugeth->phydev = NULL; - netif_stop_queue(dev); return 0; -- cgit v0.10.2 From 0b4d569de222452bcb55a4a536ade6cf4d8d1e30 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 27 Mar 2009 17:02:09 -0700 Subject: i915: fix wrong 'size_t' format string For the fifteen bazillionth time. See also commits f06da264cfb0f9444d41ca247213e419f90aa72a and aeb565dfc3ac4c8b47c5049085b4c7bfb2c7d5d7 ("i915: Fix more size_t format string warnings" and "Fix annoying DRM_ERROR() string warning"). Grr-target: Eric Anholt Grr-target: Chris Wilson Signed-off-by: Linus Torvalds diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 5a4cdb5..455ec97 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -192,7 +192,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) obj_priv = obj->driver_private; seq_printf(m, "Fenced object[%2d] = %p: %s " - "%08x %08x %08x %s %08x %08x %d", + "%08x %08zx %08x %s %08x %08x %d", i, obj, get_pin_flag(obj_priv), obj_priv->gtt_offset, obj->size, obj_priv->stride, -- cgit v0.10.2 From fa56dddd6720c8d4b9fa4c942377d2a019cf3708 Mon Sep 17 00:00:00 2001 From: Alina Friedrichsen Date: Tue, 10 Mar 2009 00:49:46 +0100 Subject: mac80211: ieee80211_ibss_commit() cleanup Don't call ieee80211_sta_find_ibss() directly, like it's done in STA mode, so that the commit() call is more harmless respectively has less site-effects. Signed-off-by: Alina Friedrichsen Signed-off-by: John W. Linville diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index f4becc1..3201e1f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -812,8 +812,9 @@ int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata) ifibss->ibss_join_req = jiffies; ifibss->state = IEEE80211_IBSS_MLME_SEARCH; + set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); - return ieee80211_sta_find_ibss(sdata); + return 0; } int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len) -- cgit v0.10.2 From 633e24ed95b6c87b42f201ecb65c12a75a5a7eef Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Thu, 12 Mar 2009 09:20:40 -0700 Subject: cfg80211/nl80211: remove usage of CONFIG_NL80211 The scan capability added to cfg80211/nl80211 introduced a dependency on nl80211 by cfg80211. We can thus no longer have just cfg80211 without nl80211. Specifically, cfg80211_scan_done() calls nl80211_send_scan_aborted() or nl80211_send_scan_done(). Now we remove the option for user to select nl80211. It will always be compiled if user selects cfg80211. Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 092ae6f..d1d18f3 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -10,19 +10,6 @@ config CFG80211_REG_DEBUG If unsure, say N. -config NL80211 - bool "nl80211 new netlink interface support" - depends on CFG80211 - default y - ---help--- - This option turns on the new netlink interface - (nl80211) support in cfg80211. - - If =n, drivers using mac80211 will be configured via - wireless extension support provided by that subsystem. - - If unsure, say Y. - config WIRELESS_OLD_REGULATORY bool "Old wireless static regulatory definitions" default y diff --git a/net/wireless/Makefile b/net/wireless/Makefile index dad43c2..c157b4d 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -5,8 +5,7 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o -cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o +cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o -cfg80211-$(CONFIG_NL80211) += nl80211.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index e65a3c3..5b5fe13 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -3,7 +3,6 @@ #include "core.h" -#ifdef CONFIG_NL80211 extern int nl80211_init(void); extern void nl80211_exit(void); extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); @@ -12,30 +11,5 @@ extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, struct net_device *netdev); extern void nl80211_send_reg_change_event(struct regulatory_request *request); -#else -static inline int nl80211_init(void) -{ - return 0; -} -static inline void nl80211_exit(void) -{ -} -static inline void nl80211_notify_dev_rename( - struct cfg80211_registered_device *rdev) -{ -} -static inline void -nl80211_send_scan_done(struct cfg80211_registered_device *rdev, - struct net_device *netdev) -{} -static inline void nl80211_send_scan_aborted( - struct cfg80211_registered_device *rdev, - struct net_device *netdev) -{} -static inline void -nl80211_send_reg_change_event(struct regulatory_request *request) -{ -} -#endif /* CONFIG_NL80211 */ #endif /* __NET_WIRELESS_NL80211_H */ -- cgit v0.10.2 From 14587ce2a8898de959f32dfd505b4871f09930d5 Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Thu, 12 Mar 2009 15:32:54 +0530 Subject: ath9k: Set IEEE80211_TX_CTL_RATE_CTRL_PROBE in rate control for probe rate Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c index 8327356..28d69da 100644 --- a/drivers/net/wireless/ath9k/rc.c +++ b/drivers/net/wireless/ath9k/rc.c @@ -864,6 +864,8 @@ static void ath_rc_ratefind(struct ath_softc *sc, rate_table, nrix, 1, 0); ath_rc_rate_set_series(rate_table, &rates[i++], txrc, try_per_rate, nrix, 0); + + tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { try_per_rate = (ATH_11N_TXMAXTRY/4); /* Set the choosen rate. No RTS for first series entry. */ -- cgit v0.10.2 From 176be728ee7d32cfd33702d82c0733e51f66ab5b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 12 Mar 2009 23:49:28 +0100 Subject: mac80211: remove ieee80211_num_regular_queues This inline is useless and actually makes the code _longer_ rather than shorter. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 12a52ef..3bfc6c6 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1017,11 +1017,6 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); } -static inline int ieee80211_num_regular_queues(struct ieee80211_hw *hw) -{ - return hw->queues; -} - static inline struct ieee80211_rate * ieee80211_get_tx_rate(const struct ieee80211_hw *hw, const struct ieee80211_tx_info *c) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 841b845..aaf7793 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1834,7 +1834,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ifmgd->flags |= IEEE80211_STA_CREATE_IBSS | IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL; - if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4) + if (sdata->local->hw.queues >= 4) ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 457238a..038460b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1666,8 +1666,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, } /* receiver and we are QoS enabled, use a QoS type frame */ - if (sta_flags & WLAN_STA_WME && - ieee80211_num_regular_queues(&local->hw) >= 4) { + if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } @@ -1802,7 +1801,7 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local) int i, j; struct ieee80211_tx_stored_packet *store; - for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { + for (i = 0; i < local->hw.queues; i++) { if (!test_bit(i, local->queues_pending)) continue; store = &local->pending_packet[i]; @@ -1827,7 +1826,7 @@ void ieee80211_tx_pending(unsigned long data) int i, ret; netif_tx_lock_bh(dev); - for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { + for (i = 0; i < local->hw.queues; i++) { /* Check that this queue is ok */ if (__netif_subqueue_stopped(local->mdev, i) && !test_bit(i, local->queues_pending_run)) -- cgit v0.10.2 From 51b381479ff5bc9b8c49ce15fd8bc35c6b695ca4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 12 Mar 2009 11:16:48 +0100 Subject: mac80211: reduce max number of queues No hw/driver actually supports more than four queues right now, and we allocate a number of things per queue which means we waste a bit of memory. Reduce the maximum number to four to accurately reflect what we do (and need for QoS). Even if we had hardware supporting more queues we couldn't take advantage of that right now anyway. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 3bfc6c6..a38a82c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -97,7 +97,7 @@ struct ieee80211_ht_bss_info { * for A-MPDU operation. */ enum ieee80211_max_queues { - IEEE80211_MAX_QUEUES = 16, + IEEE80211_MAX_QUEUES = 4, IEEE80211_MAX_AMPDU_QUEUES = 16, }; -- cgit v0.10.2 From 11432379fd2a3854a3408424d8dcd99afd811573 Mon Sep 17 00:00:00 2001 From: Helmut Schaa Date: Thu, 12 Mar 2009 14:04:34 +0100 Subject: mac80211: start pending scan after probe/auth/assoc timed out If a scan is queued in STA mode while the interface is in state direct probe, authenticate or associate the scan is delayed until the interface enters disabled or associated state. But in case of direct probe-, authentication- or association- timeout sta_work will not be scheduled anymore (without external trigger) and thus the pending scan is not executed and prevents a new scan from being triggered (-EBUSY). Fix this by queueing the sta work again after direct probe-, authentication- and association- timeout. Signed-off-by: Helmut Schaa Signed-off-by: John W. Linville diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index aaf7793..a558796 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -682,6 +682,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; ifmgd->direct_probe_tries++; if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) { @@ -697,6 +698,13 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata) ieee80211_rx_bss_remove(sdata, ifmgd->bssid, sdata->local->hw.conf.channel->center_freq, ifmgd->ssid, ifmgd->ssid_len); + + /* + * We might have a pending scan which had no chance to run yet + * due to state == IEEE80211_STA_MLME_DIRECT_PROBE. + * Hence, queue the STAs work again + */ + queue_work(local->hw.workqueue, &ifmgd->work); return; } @@ -721,6 +729,7 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata) static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; ifmgd->auth_tries++; if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) { @@ -732,6 +741,13 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) ieee80211_rx_bss_remove(sdata, ifmgd->bssid, sdata->local->hw.conf.channel->center_freq, ifmgd->ssid, ifmgd->ssid_len); + + /* + * We might have a pending scan which had no chance to run yet + * due to state == IEEE80211_STA_MLME_AUTHENTICATE. + * Hence, queue the STAs work again + */ + queue_work(local->hw.workqueue, &ifmgd->work); return; } @@ -878,6 +894,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata) static void ieee80211_associate(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; ifmgd->assoc_tries++; if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { @@ -889,6 +906,12 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata) ieee80211_rx_bss_remove(sdata, ifmgd->bssid, sdata->local->hw.conf.channel->center_freq, ifmgd->ssid, ifmgd->ssid_len); + /* + * We might have a pending scan which had no chance to run yet + * due to state == IEEE80211_STA_MLME_ASSOCIATE. + * Hence, queue the STAs work again + */ + queue_work(local->hw.workqueue, &ifmgd->work); return; } -- cgit v0.10.2 From 4ed96f04f8a1869757f4dd4a9283a18ec63c442f Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 12 Mar 2009 21:53:23 +0200 Subject: ath9k: Add support for multiple virtual AP interfaces This patch fixes the TSF offset calculation for staggered Beacon frames and sets ATH_BCBUF back to the earlier value 4 to enable multi-BSS configurations of up to four BSSes. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h index b64be8e..5afd244 100644 --- a/drivers/net/wireless/ath9k/ath9k.h +++ b/drivers/net/wireless/ath9k/ath9k.h @@ -390,6 +390,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid struct ath_vif { int av_bslot; + __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ enum nl80211_iftype av_opmode; struct ath_buf *av_bcbuf; struct ath_tx_control av_btxctl; @@ -406,7 +407,7 @@ struct ath_vif { * number of beacon intervals, the game's up. */ #define BSTUCK_THRESH (9 * ATH_BCBUF) -#define ATH_BCBUF 1 +#define ATH_BCBUF 4 #define ATH_DEFAULT_BINTVAL 100 /* TU */ #define ATH_DEFAULT_BMISS_LIMIT 10 #define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c index 039c781..3fd1b86 100644 --- a/drivers/net/wireless/ath9k/beacon.c +++ b/drivers/net/wireless/ath9k/beacon.c @@ -153,6 +153,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, bf->bf_mpdu = skb; if (skb == NULL) return NULL; + ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = + avp->tsf_adjust; info = IEEE80211_SKB_CB(skb); if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { @@ -253,7 +255,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) { struct ath_softc *sc = aphy->sc; struct ath_vif *avp; - struct ieee80211_hdr *hdr; struct ath_buf *bf; struct sk_buff *skb; __le64 tstamp; @@ -316,42 +317,33 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; sc->beacon.bc_tstamp = le64_to_cpu(tstamp); - - /* - * Calculate a TSF adjustment factor required for - * staggered beacons. Note that we assume the format - * of the beacon frame leaves the tstamp field immediately - * following the header. - */ + /* Calculate a TSF adjustment factor required for staggered beacons. */ if (avp->av_bslot > 0) { u64 tsfadjust; - __le64 val; int intval; intval = sc->hw->conf.beacon_int ? sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL; /* - * The beacon interval is in TU's; the TSF in usecs. - * We figure out how many TU's to add to align the - * timestamp then convert to TSF units and handle - * byte swapping before writing it in the frame. - * The hardware will then add this each time a beacon - * frame is sent. Note that we align vif's 1..N - * and leave vif 0 untouched. This means vap 0 - * has a timestamp in one beacon interval while the - * others get a timestamp aligned to the next interval. + * Calculate the TSF offset for this beacon slot, i.e., the + * number of usecs that need to be added to the timestamp field + * in Beacon and Probe Response frames. Beacon slot 0 is + * processed at the correct offset, so it does not require TSF + * adjustment. Other slots are adjusted to get the timestamp + * close to the TBTT for the BSS. */ - tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF; - val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */ + tsfadjust = intval * avp->av_bslot / ATH_BCBUF; + avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); DPRINTF(sc, ATH_DBG_BEACON, "stagger beacons, bslot %d intval %u tsfadjust %llu\n", avp->av_bslot, intval, (unsigned long long)tsfadjust); - hdr = (struct ieee80211_hdr *)skb->data; - memcpy(&hdr[1], &val, sizeof(val)); - } + ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = + avp->tsf_adjust; + } else + avp->tsf_adjust = cpu_to_le64(0); bf->bf_mpdu = skb; bf->bf_buf_addr = bf->bf_dmacontext = @@ -447,8 +439,16 @@ void ath_beacon_tasklet(unsigned long data) tsf = ath9k_hw_gettsf64(ah); tsftu = TSF_TO_TU(tsf>>32, tsf); slot = ((tsftu % intval) * ATH_BCBUF) / intval; - vif = sc->beacon.bslot[(slot + 1) % ATH_BCBUF]; - aphy = sc->beacon.bslot_aphy[(slot + 1) % ATH_BCBUF]; + /* + * Reverse the slot order to get slot 0 on the TBTT offset that does + * not require TSF adjustment and other slots adding + * slot/ATH_BCBUF * beacon_int to timestamp. For example, with + * ATH_BCBUF = 4, we process beacon slots as follows: 3 2 1 0 3 2 1 .. + * and slot 0 is at correct offset to TBTT. + */ + slot = ATH_BCBUF - slot - 1; + vif = sc->beacon.bslot[slot]; + aphy = sc->beacon.bslot_aphy[slot]; DPRINTF(sc, ATH_DBG_BEACON, "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", -- cgit v0.10.2 From b572b24c578ab1be9d1fcb11d2d8244878757a66 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Thu, 12 Mar 2009 18:18:51 -0400 Subject: ath9k: remove dummy PCI "retry timeout" fix Remove the PCI retry timeout code as that was just taken from ipw2100 due to historical reasons but in reality its a no-op, additionally its simply incorrect as each PCI devices has its own custom PCI configuration space on PCI config space >= 0x40. Not to mention we were trying to write 0 to a place that already has 0 on it. Cc: Matthew Garrett Cc: Ben Cahill Cc: Inaky Perez-Gonzalez Tested-by: Adel Gadllah Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c index 9a58baa..53572d96 100644 --- a/drivers/net/wireless/ath9k/pci.c +++ b/drivers/net/wireless/ath9k/pci.c @@ -87,7 +87,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct ath_softc *sc; struct ieee80211_hw *hw; u8 csz; - u32 val; int ret = 0; struct ath_hw *ah; @@ -134,14 +133,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - /* - * Disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state. - */ - pci_read_config_dword(pdev, 0x40, &val); - if ((val & 0x0000ff00) != 0) - pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - ret = pci_request_region(pdev, 0, "ath9k"); if (ret) { dev_err(&pdev->dev, "PCI memory region reserve error\n"); @@ -253,21 +244,12 @@ static int ath_pci_resume(struct pci_dev *pdev) struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; - u32 val; int err; err = pci_enable_device(pdev); if (err) return err; pci_restore_state(pdev); - /* - * Suspend/Resume resets the PCI configuration space, so we have to - * re-disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state - */ - pci_read_config_dword(pdev, 0x40, &val); - if ((val & 0x0000ff00) != 0) - pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); /* Enable LED */ ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN, -- cgit v0.10.2 From 7d01b221b338ef2a5b1ffa9c60645c0bdcce191e Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:55:55 +0530 Subject: ath9k: Miscellaneous EEPROM handling cleanup Print the EEPROM version/revision on init. Choose appropriate debug masks on error conditions, and remove useless print messages. Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c index 183c949..6c9bc5f 100644 --- a/drivers/net/wireless/ath9k/eeprom.c +++ b/drivers/net/wireless/ath9k/eeprom.c @@ -342,8 +342,7 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) - struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; - u16 *eep_data; + u16 *eep_data = (u16 *)&ah->eeprom.map4k; int addr, eep_start_loc = 0; eep_start_loc = 64; @@ -353,8 +352,6 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) "Reading from EEPROM, not flash\n"); } - eep_data = (u16 *)eep; - for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, @@ -363,6 +360,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) } eep_data++; } + return true; #undef SIZE_EEPROM_4K } @@ -379,16 +377,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) if (!ath9k_hw_use_flash(ah)) { - if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n"); return false; } DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "Read Magic = 0x%04X\n", magic); + "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -401,16 +398,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) temp = swab16(*eepdata); *eepdata = temp; eepdata++; - - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "0x%04X ", *eepdata); - - if (((addr + 1) % 6) == 0) - DPRINTF(ah->ah_sc, - ATH_DBG_EEPROM, "\n"); } } else { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid EEPROM Magic. " "endianness mismatch.\n"); return -EINVAL; @@ -441,7 +431,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) u16 word; DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "EEPROM Endianness is not native.. Changing \n"); + "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; @@ -483,7 +473,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", sum, ah->eep_ops->get_eeprom_ver(ah)); return -EINVAL; @@ -1295,9 +1285,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, db2[4] = ((pModal->db2_234 >> 8) & 0xf); } else if (pModal->version == 1) { - - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "EEPROM Model version is set to 1 \n"); ob[0] = (pModal->ob_01 & 0xf); ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf; db1[0] = (pModal->db1_01 & 0xf); @@ -1464,16 +1451,13 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) { #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) - struct ar5416_eeprom_def *eep = &ah->eeprom.def; - u16 *eep_data; + u16 *eep_data = (u16 *)&ah->eeprom.def; int addr, ar5416_eep_start_loc = 0x100; - eep_data = (u16 *)eep; - for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, eep_data)) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Unable to read eeprom region\n"); return false; } @@ -1492,17 +1476,14 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) bool need_swap = false; int i, addr, size; - if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, - &magic)) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "Reading Magic # failed\n"); + if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n"); return false; } if (!ath9k_hw_use_flash(ah)) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "Read Magic = 0x%04X\n", magic); + "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -1516,18 +1497,11 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) temp = swab16(*eepdata); *eepdata = temp; eepdata++; - - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "0x%04X ", *eepdata); - - if (((addr + 1) % 6) == 0) - DPRINTF(ah->ah_sc, - ATH_DBG_EEPROM, "\n"); } } else { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid EEPROM Magic. " - "endianness mismatch.\n"); + "Endianness mismatch.\n"); return -EINVAL; } } @@ -1556,7 +1530,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) u16 word; DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "EEPROM Endianness is not native.. Changing \n"); + "EEPROM Endianness is not native.. Changing.\n"); word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; @@ -1602,7 +1576,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", sum, ah->eep_ops->get_eeprom_ver(ah)); return -EINVAL; @@ -1855,8 +1829,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, AR_AN_TOP2_LOCALBIAS, AR_AN_TOP2_LOCALBIAS_S, pModal->local_bias); - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n", - pModal->force_xpaon); REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, pModal->force_xpaon); } @@ -1882,6 +1854,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); + if (AR_SREV_9280_10_OR_LATER(ah)) { REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, pModal->thresh62); @@ -1912,10 +1885,10 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, } if (AR_SREV_9280_20_OR_LATER(ah) && - AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL, - AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, - pModal->miscBits); + AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, + pModal->miscBits); if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) { @@ -1926,14 +1899,14 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0); else REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, - eep->baseEepHeader.dacLpMode); + eep->baseEepHeader.dacLpMode); REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP, - pModal->miscBits >> 2); + pModal->miscBits >> 2); REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9, - AR_PHY_TX_DESIRED_SCALE_CCK, - eep->baseEepHeader.desiredScaleCCK); + AR_PHY_TX_DESIRED_SCALE_CCK, + eep->baseEepHeader.desiredScaleCCK); } return true; diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c index d494e98..3dd054b 100644 --- a/drivers/net/wireless/ath9k/hw.c +++ b/drivers/net/wireless/ath9k/hw.c @@ -588,6 +588,10 @@ static int ath9k_hw_post_attach(struct ath_hw *ah) ecode = ath9k_hw_eeprom_attach(ah); if (ecode != 0) return ecode; + + DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n", + ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); + ecode = ath9k_hw_rfattach(ah); if (ecode != 0) return ecode; -- cgit v0.10.2 From 355363fcf7daded4a48308b54d6b86bea4f8bb6d Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:56:02 +0530 Subject: ath9k: Move AR5416_VER_MASK to a common location Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c index 6c9bc5f..49dee23 100644 --- a/drivers/net/wireless/ath9k/eeprom.c +++ b/drivers/net/wireless/ath9k/eeprom.c @@ -1588,7 +1588,6 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { -#define AR5416_VER_MASK (pBase->version & AR5416_EEP_VER_MINOR_MASK) struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct modal_eep_header *pModal = eep->modalHeader; struct base_eep_header *pBase = &eep->baseEepHeader; @@ -1655,14 +1654,12 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, default: return 0; } -#undef AR5416_VER_MASK } /* XXX: Clean me up, make me more legible */ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { -#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) struct modal_eep_header *pModal; struct ar5416_eeprom_def *eep = &ah->eeprom.def; int i, regChainOffset; @@ -1910,7 +1907,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, } return true; -#undef AR5416_VER_MASK } static void ath9k_hw_def_set_addac(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h index d6f6108..0a4b22a 100644 --- a/drivers/net/wireless/ath9k/eeprom.h +++ b/drivers/net/wireless/ath9k/eeprom.h @@ -95,6 +95,7 @@ #define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) #define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) +#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) #define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) -- cgit v0.10.2 From a83615d74d9b952f24c904baad58610ed9d76838 Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:56:04 +0530 Subject: ath9k: Introduce a helper function for setting board gain values This improves readability. Handle both 4K/non-4K EEPROM in this patch. Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c index 49dee23..c3bb196 100644 --- a/drivers/net/wireless/ath9k/eeprom.c +++ b/drivers/net/wireless/ath9k/eeprom.c @@ -1193,57 +1193,63 @@ static void ath9k_hw_4k_set_addac(struct ath_hw *ah, } } -static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, - struct ath9k_channel *chan) +static void ath9k_hw_4k_set_gain(struct ath_hw *ah, + struct modal_eep_4k_header *pModal, + struct ar5416_eeprom_4k *eep, + u8 txRxAttenLocal, int regChainOffset) { - struct modal_eep_4k_header *pModal; - struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; - int regChainOffset; - u8 txRxAttenLocal; - u8 ob[5], db1[5], db2[5]; - u8 ant_div_control1, ant_div_control2; - u32 regVal; - - - pModal = &eep->modalHeader; - - txRxAttenLocal = 23; - - REG_WRITE(ah, AR_PHY_SWITCH_COM, - ah->eep_ops->get_eeprom_antenna_cfg(ah, chan)); - - regChainOffset = 0; REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, pModal->antCtrlChain[0]); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, - (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & - ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | - AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | - SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | - SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); + (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & + ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | + SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | + SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_3) { + AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); + AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, - pModal->xatten2Margin[0]); + AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, + pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); + AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); } REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, - AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); + AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, - AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); + AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); if (AR_SREV_9285_11(ah)) REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); +} + +static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct modal_eep_4k_header *pModal; + struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; + u8 txRxAttenLocal; + u8 ob[5], db1[5], db2[5]; + u8 ant_div_control1, ant_div_control2; + u32 regVal; + + pModal = &eep->modalHeader; + txRxAttenLocal = 23; + + REG_WRITE(ah, AR_PHY_SWITCH_COM, + ah->eep_ops->get_eeprom_antenna_cfg(ah, chan)); + + /* Single chain for 4K EEPROM*/ + ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal, 0); /* Initialize Ant Diversity settings from EEPROM */ if (pModal->version == 3) { @@ -1656,7 +1662,62 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, } } -/* XXX: Clean me up, make me more legible */ +static void ath9k_hw_def_set_gain(struct ath_hw *ah, + struct modal_eep_header *pModal, + struct ar5416_eeprom_def *eep, + u8 txRxAttenLocal, int regChainOffset, int i) +{ + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + txRxAttenLocal = pModal->txRxAttenCh[i]; + + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, + pModal->bswMargin[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_DB, + pModal->bswAtten[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, + pModal->xatten2Margin[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN2_DB, + pModal->xatten2Db[i]); + } else { + REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_BSW_MARGIN) + | SM(pModal-> bswMargin[i], + AR_PHY_GAIN_2GHZ_BSW_MARGIN)); + REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_BSW_ATTEN) + | SM(pModal->bswAtten[i], + AR_PHY_GAIN_2GHZ_BSW_ATTEN)); + } + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, + AR_PHY_RXGAIN + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); + REG_RMW_FIELD(ah, + AR_PHY_RXGAIN + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]); + } else { + REG_WRITE(ah, + AR_PHY_RXGAIN + regChainOffset, + (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) & + ~AR_PHY_RXGAIN_TXRX_ATTEN) + | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN)); + REG_WRITE(ah, + AR_PHY_GAIN_2GHZ + regChainOffset, + (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) | + SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN)); + } +} + static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -1666,7 +1727,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, u8 txRxAttenLocal; pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); - txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; REG_WRITE(ah, AR_PHY_SWITCH_COM, @@ -1679,8 +1739,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, } if (AR_SREV_5416_20_OR_LATER(ah) && - (ah->rxchainmask == 5 || ah->txchainmask == 5) - && (i != 0)) + (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0)) regChainOffset = (i == 1) ? 0x2000 : 0x1000; else regChainOffset = i * 0x1000; @@ -1689,9 +1748,7 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, pModal->antCtrlChain[i]); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, - (REG_READ(ah, - AR_PHY_TIMING_CTRL4(0) + - regChainOffset) & + (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | SM(pModal->iqCalICh[i], @@ -1699,87 +1756,9 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, SM(pModal->iqCalQCh[i], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); - if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { - txRxAttenLocal = pModal->txRxAttenCh[i]; - if (AR_SREV_9280_10_OR_LATER(ah)) { - REG_RMW_FIELD(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, - pModal-> - bswMargin[i]); - REG_RMW_FIELD(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN1_DB, - pModal-> - bswAtten[i]); - REG_RMW_FIELD(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, - pModal-> - xatten2Margin[i]); - REG_RMW_FIELD(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - AR_PHY_GAIN_2GHZ_XATTEN2_DB, - pModal-> - xatten2Db[i]); - } else { - REG_WRITE(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - (REG_READ(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset) & - ~AR_PHY_GAIN_2GHZ_BSW_MARGIN) - | SM(pModal-> - bswMargin[i], - AR_PHY_GAIN_2GHZ_BSW_MARGIN)); - REG_WRITE(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - (REG_READ(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset) & - ~AR_PHY_GAIN_2GHZ_BSW_ATTEN) - | SM(pModal->bswAtten[i], - AR_PHY_GAIN_2GHZ_BSW_ATTEN)); - } - } - if (AR_SREV_9280_10_OR_LATER(ah)) { - REG_RMW_FIELD(ah, - AR_PHY_RXGAIN + - regChainOffset, - AR9280_PHY_RXGAIN_TXRX_ATTEN, - txRxAttenLocal); - REG_RMW_FIELD(ah, - AR_PHY_RXGAIN + - regChainOffset, - AR9280_PHY_RXGAIN_TXRX_MARGIN, - pModal->rxTxMarginCh[i]); - } else { - REG_WRITE(ah, - AR_PHY_RXGAIN + regChainOffset, - (REG_READ(ah, - AR_PHY_RXGAIN + - regChainOffset) & - ~AR_PHY_RXGAIN_TXRX_ATTEN) | - SM(txRxAttenLocal, - AR_PHY_RXGAIN_TXRX_ATTEN)); - REG_WRITE(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset, - (REG_READ(ah, - AR_PHY_GAIN_2GHZ + - regChainOffset) & - ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) | - SM(pModal->rxTxMarginCh[i], - AR_PHY_GAIN_2GHZ_RXTX_MARGIN)); - } - } + if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) + ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal, + regChainOffset, i); } if (AR_SREV_9280_10_OR_LATER(ah)) { -- cgit v0.10.2 From d6509151bd3e952b7d157ea4dbae23279d427e95 Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:56:05 +0530 Subject: ath9k: Change return type for set_board_values() We always return true, checking for 'false' return value is bogus anyway, so fix this. Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c index c3bb196..973b576 100644 --- a/drivers/net/wireless/ath9k/eeprom.c +++ b/drivers/net/wireless/ath9k/eeprom.c @@ -1232,7 +1232,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah, REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); } -static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, +static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_4k_header *pModal; @@ -1378,8 +1378,6 @@ static bool ath9k_hw_4k_set_board_values(struct ath_hw *ah, AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40); } - - return true; } static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, @@ -1718,7 +1716,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah, } } -static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, +static void ath9k_hw_def_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_header *pModal; @@ -1884,8 +1882,6 @@ static bool ath9k_hw_def_set_board_values(struct ath_hw *ah, AR_PHY_TX_DESIRED_SCALE_CCK, eep->baseEepHeader.desiredScaleCCK); } - - return true; } static void ath9k_hw_def_set_addac(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h index 0a4b22a..8e5b880 100644 --- a/drivers/net/wireless/ath9k/eeprom.h +++ b/drivers/net/wireless/ath9k/eeprom.h @@ -490,7 +490,7 @@ struct eeprom_ops { u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, struct ath9k_channel *chan); - bool (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); + void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c index 3dd054b..78e5763 100644 --- a/drivers/net/wireless/ath9k/hw.c +++ b/drivers/net/wireless/ath9k/hw.c @@ -2277,11 +2277,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, else ath9k_hw_spur_mitigate(ah, chan); - if (!ah->eep_ops->set_board_values(ah, chan)) { - DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, - "error setting board options\n"); - return -EIO; - } + ah->eep_ops->set_board_values(ah, chan); ath9k_hw_decrease_chain_power(ah, chan); -- cgit v0.10.2 From e71cef37f1f4cb7e9c919cbaabe23438f10a7080 Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:56:07 +0530 Subject: ath9k: Fix bug in 4K EEPROM size calculation We should be checking with the 4K header and not the non-4K header size. Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c index 973b576..d7b9cf4 100644 --- a/drivers/net/wireless/ath9k/eeprom.c +++ b/drivers/net/wireless/ath9k/eeprom.c @@ -416,7 +416,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) else el = ah->eeprom.map4k.baseEepHeader.length; - if (el > sizeof(struct ar5416_eeprom_def)) + if (el > sizeof(struct ar5416_eeprom_4k)) el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); else el = el / sizeof(u16); -- cgit v0.10.2 From 95e4acb7331722236b9f11492ae2e96473210ebc Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:56:09 +0530 Subject: ath9k: Fill in ack signal in TX status This patch fills the ack_signal field in TX status with an appropriate value from the TX descriptor. Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index e3f3766..0aae8f3 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c @@ -1852,13 +1852,17 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, return nbad; } -static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad) +static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, + int nbad, int txok) { struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); + if (txok) + tx_info->status.ack_signal = ds->ds_txstat.ts_rssi; + tx_info_priv->update_rc = false; if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; @@ -1996,7 +2000,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) nbad = ath_tx_num_badfrms(sc, bf, txok); } - ath_tx_rc_status(bf, ds, nbad); + ath_tx_rc_status(bf, ds, nbad, txok); if (bf_isampdu(bf)) ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); -- cgit v0.10.2 From c2da50e5837f92c9f16a611efd1bb0754036691b Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 08:56:11 +0530 Subject: ath9k: Fix bug in handling single stream stations AP mode currently sets up the dual stream capability for all stations. This patch fixes it by checking if the associated station supports dual stream MCS rates (8-15). We would disregard any MCS rates above 15, since Atheros HW supports only 0..15 rates currently, and can't receive at rates > 15 anyway. Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c index 28d69da..74bc4e6 100644 --- a/drivers/net/wireless/ath9k/rc.c +++ b/drivers/net/wireless/ath9k/rc.c @@ -1470,16 +1470,18 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->ht_cap); } -static u8 ath_rc_build_ht_caps(struct ath_softc *sc, bool is_ht, bool is_cw40, - bool is_sgi40) +static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, + bool is_cw40, bool is_sgi40) { u8 caps = 0; - if (is_ht) { + if (sta->ht_cap.ht_supported) { caps = WLAN_RC_HT_FLAG; if (sc->sc_ah->caps.tx_chainmask != 1 && - ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) - caps |= WLAN_RC_DS_FLAG; + ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) { + if (sta->ht_cap.mcs.rx_mask[1]) + caps |= WLAN_RC_DS_FLAG; + } if (is_cw40) caps |= WLAN_RC_40_FLAG; if (is_sgi40) @@ -1626,8 +1628,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, rate_table = sc->cur_rate_table; } - ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta->ht_cap.ht_supported, - is_cw40, is_sgi40); + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40); ath_rc_init(sc, priv_sta, sband, sta, rate_table); } @@ -1661,8 +1662,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, rate_table = ath_choose_rate_table(sc, sband->band, sta->ht_cap.ht_supported, oper_cw40); - ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, - sta->ht_cap.ht_supported, + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, oper_cw40, oper_sgi40); ath_rc_init(sc, priv_sta, sband, sta, rate_table); -- cgit v0.10.2 From cee075a24eec64f1f5b2b3b14753b2d4b8ecce55 Mon Sep 17 00:00:00 2001 From: Sujith Date: Fri, 13 Mar 2009 09:07:23 +0530 Subject: ath9k: Update copyright in all the files How time flies. Signed-off-by: Sujith Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/ahb.c b/drivers/net/wireless/ath9k/ahb.c index 00cc7bb..0e65c51 100644 --- a/drivers/net/wireless/ath9k/ahb.c +++ b/drivers/net/wireless/ath9k/ahb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * Copyright (c) 2009 Gabor Juhos * Copyright (c) 2009 Imre Kaloz * diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath9k/ani.c index a39eb76..6c5e887 100644 --- a/drivers/net/wireless/ath9k/ani.c +++ b/drivers/net/wireless/ath9k/ani.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/ani.h b/drivers/net/wireless/ath9k/ani.h index 7315761..08b4e7e 100644 --- a/drivers/net/wireless/ath9k/ani.h +++ b/drivers/net/wireless/ath9k/ani.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h index 5afd244..2b02564 100644 --- a/drivers/net/wireless/ath9k/ath9k.h +++ b/drivers/net/wireless/ath9k/ath9k.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c index 3fd1b86..e5b0071 100644 --- a/drivers/net/wireless/ath9k/beacon.c +++ b/drivers/net/wireless/ath9k/beacon.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath9k/calib.c index c9446fb..e2d62e9 100644 --- a/drivers/net/wireless/ath9k/calib.c +++ b/drivers/net/wireless/ath9k/calib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/calib.h b/drivers/net/wireless/ath9k/calib.h index 32589e0..1c74bd5 100644 --- a/drivers/net/wireless/ath9k/calib.h +++ b/drivers/net/wireless/ath9k/calib.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath9k/debug.c index 82573ca..fdf9528 100644 --- a/drivers/net/wireless/ath9k/debug.c +++ b/drivers/net/wireless/ath9k/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/debug.h b/drivers/net/wireless/ath9k/debug.h index 065268b..7b0e541 100644 --- a/drivers/net/wireless/ath9k/debug.h +++ b/drivers/net/wireless/ath9k/debug.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c index d7b9cf4..ffc36b0 100644 --- a/drivers/net/wireless/ath9k/eeprom.c +++ b/drivers/net/wireless/ath9k/eeprom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath9k/eeprom.h index 8e5b880..25b68c8 100644 --- a/drivers/net/wireless/ath9k/eeprom.h +++ b/drivers/net/wireless/ath9k/eeprom.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c index 78e5763..15e4d42 100644 --- a/drivers/net/wireless/ath9k/hw.c +++ b/drivers/net/wireless/ath9k/hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h index dc681f0..0b594e0 100644 --- a/drivers/net/wireless/ath9k/hw.h +++ b/drivers/net/wireless/ath9k/hw.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h index 1d60c37..e2f0a34 100644 --- a/drivers/net/wireless/ath9k/initvals.h +++ b/drivers/net/wireless/ath9k/initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath9k/mac.c index f757bc7..e0a6dee 100644 --- a/drivers/net/wireless/ath9k/mac.c +++ b/drivers/net/wireless/ath9k/mac.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/mac.h b/drivers/net/wireless/ath9k/mac.h index a75f65d..1176bce 100644 --- a/drivers/net/wireless/ath9k/mac.h +++ b/drivers/net/wireless/ath9k/mac.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 8db75f6..7d27eed 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c index 53572d96..6dbc585 100644 --- a/drivers/net/wireless/ath9k/pci.c +++ b/drivers/net/wireless/ath9k/pci.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c index e1494ba..8bcba90 100644 --- a/drivers/net/wireless/ath9k/phy.c +++ b/drivers/net/wireless/ath9k/phy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h index 1eac8c7..0f7f8e0 100644 --- a/drivers/net/wireless/ath9k/phy.h +++ b/drivers/net/wireless/ath9k/phy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c index 74bc4e6..6c2fd39 100644 --- a/drivers/net/wireless/ath9k/rc.c +++ b/drivers/net/wireless/ath9k/rc.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Video54 Technologies, Inc. - * Copyright (c) 2004-2008 Atheros Communications, Inc. + * Copyright (c) 2004-2009 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h index db9b0b9..199a3ce57 100644 --- a/drivers/net/wireless/ath9k/rc.h +++ b/drivers/net/wireless/ath9k/rc.h @@ -1,7 +1,7 @@ /* * Copyright (c) 2004 Sam Leffler, Errno Consulting * Copyright (c) 2004 Video54 Technologies, Inc. - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index 0bba176..917bac7 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h index d86e90e..5260524 100644 --- a/drivers/net/wireless/ath9k/reg.h +++ b/drivers/net/wireless/ath9k/reg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c index b8f9b6d..4ca6251 100644 --- a/drivers/net/wireless/ath9k/regd.c +++ b/drivers/net/wireless/ath9k/regd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h index 8f885f3..9f5fbd4 100644 --- a/drivers/net/wireless/ath9k/regd.h +++ b/drivers/net/wireless/ath9k/regd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h index b41d000..4d0e298 100644 --- a/drivers/net/wireless/ath9k/regd_common.h +++ b/drivers/net/wireless/ath9k/regd_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 0aae8f3..8968abe 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above -- cgit v0.10.2 From b5bde374f0f61f5d97114d400ade8fc96bf6f10d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Mar 2009 11:19:45 +0100 Subject: mac80211: fix warnings in ieee80211_if_config The last warning can never trigger, and the explicit AP_VLAN check is pointless if we move the config_interface check down, in practice config_interface is required anyway. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/main.c b/net/mac80211/main.c index f38db4d..dac68d4 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -161,12 +161,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) if (WARN_ON(!netif_running(sdata->dev))) return 0; - if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) - return -EINVAL; - - if (!local->ops->config_interface) - return 0; - memset(&conf, 0, sizeof(conf)); if (sdata->vif.type == NL80211_IFTYPE_STATION) @@ -183,6 +177,9 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) return -EINVAL; } + if (!local->ops->config_interface) + return 0; + switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: @@ -224,9 +221,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) } } - if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID))) - return -EINVAL; - conf.changed = changed; return local->ops->config_interface(local_to_hw(local), -- cgit v0.10.2 From 25420604c8967ff24f087dd7b9cd4b278567d39a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Mar 2009 11:43:36 +0100 Subject: mac80211: stop queues across suspend/resume Even though userland probably cannot submit packets, there might still be some coming, and that's no good when the driver doesn't expect them. Stop the queues across suspend/resume. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index fbb91f1..ad12c2a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -598,6 +598,7 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_PS, IEEE80211_QUEUE_STOP_REASON_CSA, IEEE80211_QUEUE_STOP_REASON_AGGREGATION, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, }; struct ieee80211_master_priv { diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 44525f5..c923ceb 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -11,6 +11,9 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) struct ieee80211_if_init_conf conf; struct sta_info *sta; + ieee80211_stop_queues_by_reason(hw, + IEEE80211_QUEUE_STOP_REASON_SUSPEND); + flush_workqueue(local->hw.workqueue); /* disable keys */ @@ -113,5 +116,8 @@ int __ieee80211_resume(struct ieee80211_hw *hw) ieee80211_configure_filter(local); netif_addr_unlock_bh(local->mdev); + ieee80211_wake_queues_by_reason(hw, + IEEE80211_QUEUE_STOP_REASON_SUSPEND); + return 0; } -- cgit v0.10.2 From aae89831df03e5282a8f5c0ee46432cfb677fc5c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Mar 2009 12:52:10 +0100 Subject: wireless: radiotap updates Radiotap was updated to include a "bad PLCP" flag and standardise the "bad FCS" flag in the "flags" rather than "RX flags" field, this patch updates Linux to that standard. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h index f8eb9097..d16b264 100644 --- a/drivers/net/wireless/libertas/radiotap.h +++ b/drivers/net/wireless/libertas/radiotap.h @@ -33,22 +33,12 @@ struct rx_radiotap_hdr { struct ieee80211_radiotap_header hdr; u8 flags; u8 rate; - u16 chan_freq; - u16 chan_flags; - u8 antenna; u8 antsignal; - u16 rx_flags; -#if 0 - u8 pad[IEEE80211_RADIOTAP_HDRLEN - 18]; -#endif } __attribute__ ((packed)); #define RX_RADIOTAP_PRESENT ( \ (1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ - (1 << IEEE80211_RADIOTAP_CHANNEL) | \ - (1 << IEEE80211_RADIOTAP_ANTENNA) | \ (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\ - (1 << IEEE80211_RADIOTAP_RX_FLAGS) | \ 0) diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 4f60948..63d7e19 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -351,19 +351,11 @@ static int process_rxed_802_11_packet(struct lbs_private *priv, radiotap_hdr.hdr.it_pad = 0; radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); - /* unknown values */ - radiotap_hdr.flags = 0; - radiotap_hdr.chan_freq = 0; - radiotap_hdr.chan_flags = 0; - radiotap_hdr.antenna = 0; - /* known values */ + if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) + radiotap_hdr.flags |= IEEE80211_RADIOTAP_F_BADFCS; radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate); /* XXX must check no carryout */ radiotap_hdr.antsignal = prxpd->snr + prxpd->nf; - radiotap_hdr.rx_flags = 0; - if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) - radiotap_hdr.rx_flags |= IEEE80211_RADIOTAP_F_RX_BADFCS; - //memset(radiotap_hdr.pad, 0x11, IEEE80211_RADIOTAP_HDRLEN - 18); /* chop the rxpd */ skb_pull(skb, sizeof(struct rxpd)); diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 384698c..23c3f3d 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -230,8 +230,10 @@ enum ieee80211_radiotap_type { * 802.11 header and payload * (to 32-bit boundary) */ +#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */ + /* For IEEE80211_RADIOTAP_RX_FLAGS */ -#define IEEE80211_RADIOTAP_F_RX_BADFCS 0x0001 /* frame failed crc check */ +#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */ /* For IEEE80211_RADIOTAP_TX_FLAGS */ #define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 66f7ecf..fcc0a59 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -142,6 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, /* IEEE80211_RADIOTAP_FLAGS */ if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) *pos |= IEEE80211_RADIOTAP_F_FCS; + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) + *pos |= IEEE80211_RADIOTAP_F_BADFCS; if (status->flag & RX_FLAG_SHORTPRE) *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; pos++; @@ -204,9 +206,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, /* ensure 2 byte alignment for the 2 byte field as required */ if ((pos - (unsigned char *)rthdr) & 1) pos++; - /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ - if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) - *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); + if (status->flag & RX_FLAG_FAILED_PLCP_CRC) + *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP); pos += 2; } -- cgit v0.10.2 From ec30415f7935f0ff92f93a4ac87233ca3007a78a Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Fri, 13 Mar 2009 20:26:52 +0530 Subject: mac80211: Populate HT limitation with TKIP/WEP to the handler for SIOCSIWENCODE too Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: John W. Linville diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 935c63e..e55d283 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -630,7 +630,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev, struct ieee80211_sub_if_data *sdata; int idx, i, alg = ALG_WEP; u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - int remove = 0; + int remove = 0, ret; sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -656,11 +656,20 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev, return 0; } - return ieee80211_set_encryption( + ret = ieee80211_set_encryption( sdata, bcaddr, idx, alg, remove, !sdata->default_key, keybuf, erq->length); + + if (!ret) { + if (remove) + sdata->u.mgd.flags &= ~IEEE80211_STA_TKIP_WEP_USED; + else + sdata->u.mgd.flags |= IEEE80211_STA_TKIP_WEP_USED; + } + + return ret; } -- cgit v0.10.2 From 8fdc621dc743b87879ccf0177969864b09388d9a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 Mar 2009 09:34:01 +0100 Subject: nl80211: export supported commands This makes nl80211 export the supported commands (command groups) per wiphy so userspace has an idea what it can do -- this will be required reading for userspace when we introduce auth/assoc /or/ connect for older hardware that cannot separate auth and assoc. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index f33aa08..3700d92 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -349,6 +349,10 @@ enum nl80211_commands { * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) * + * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies + * an array of command numbers (i.e. a mapping index to command number) + * that the driver for the given wiphy supports. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -426,6 +430,8 @@ enum nl80211_attrs { NL80211_ATTR_REG_INITIATOR, NL80211_ATTR_REG_TYPE, + NL80211_ATTR_SUPPORTED_COMMANDS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ab9d8f1..58ee1b1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -131,6 +131,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct nlattr *nl_freqs, *nl_freq; struct nlattr *nl_rates, *nl_rate; struct nlattr *nl_modes; + struct nlattr *nl_cmds; enum ieee80211_band band; struct ieee80211_channel *chan; struct ieee80211_rate *rate; @@ -242,6 +243,32 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, } nla_nest_end(msg, nl_bands); + nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); + if (!nl_cmds) + goto nla_put_failure; + + i = 0; +#define CMD(op, n) \ + do { \ + if (dev->ops->op) { \ + i++; \ + NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ + } \ + } while (0) + + CMD(add_virtual_intf, NEW_INTERFACE); + CMD(change_virtual_intf, SET_INTERFACE); + CMD(add_key, NEW_KEY); + CMD(add_beacon, NEW_BEACON); + CMD(add_station, NEW_STATION); + CMD(add_mpath, NEW_MPATH); + CMD(set_mesh_params, SET_MESH_PARAMS); + CMD(change_bss, SET_BSS); + CMD(set_mgmt_extra_ie, SET_MGMT_EXTRA_IE); + +#undef CMD + nla_nest_end(msg, nl_cmds); + return genlmsg_end(msg, hdr); nla_put_failure: -- cgit v0.10.2 From 7f0216a49bea717b9606b81c60f2f0b6152123eb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 Mar 2009 09:42:49 +0100 Subject: mac80211: acquire sta_lock for station suspend/resume To avoid concurrent manipulations of the sta list (which shouldn't be possible at this point, but anyway) we need to hold the sta_lock around iterating the list. At the same time, we do not need to iterate the list at all if the driver doesn't want to be notified. Signed-off-by: Johannes Berg Acked-by: Bob Copeland Signed-off-by: John W. Linville diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index c923ceb..ef7be1c 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -10,6 +10,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) struct ieee80211_sub_if_data *sdata; struct ieee80211_if_init_conf conf; struct sta_info *sta; + unsigned long flags; ieee80211_stop_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_SUSPEND); @@ -21,9 +22,9 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) ieee80211_disable_keys(sdata); /* remove STAs */ - list_for_each_entry(sta, &local->sta_list, list) { - - if (local->ops->sta_notify) { + if (local->ops->sta_notify) { + spin_lock_irqsave(&local->sta_lock, flags); + list_for_each_entry(sta, &local->sta_list, list) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, @@ -32,11 +33,11 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) local->ops->sta_notify(hw, &sdata->vif, STA_NOTIFY_REMOVE, &sta->sta); } + spin_unlock_irqrestore(&local->sta_lock, flags); } /* remove all interfaces */ list_for_each_entry(sdata, &local->interfaces, list) { - if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && netif_running(sdata->dev)) { @@ -64,6 +65,7 @@ int __ieee80211_resume(struct ieee80211_hw *hw) struct ieee80211_sub_if_data *sdata; struct ieee80211_if_init_conf conf; struct sta_info *sta; + unsigned long flags; int res; /* restart hardware */ @@ -75,7 +77,6 @@ int __ieee80211_resume(struct ieee80211_hw *hw) /* add interfaces */ list_for_each_entry(sdata, &local->interfaces, list) { - if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && netif_running(sdata->dev)) { @@ -87,9 +88,9 @@ int __ieee80211_resume(struct ieee80211_hw *hw) } /* add STAs back */ - list_for_each_entry(sta, &local->sta_list, list) { - - if (local->ops->sta_notify) { + if (local->ops->sta_notify) { + spin_lock_irqsave(&local->sta_lock, flags); + list_for_each_entry(sta, &local->sta_list, list) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, @@ -98,6 +99,7 @@ int __ieee80211_resume(struct ieee80211_hw *hw) local->ops->sta_notify(hw, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); } + spin_unlock_irqrestore(&local->sta_lock, flags); } /* add back keys */ -- cgit v0.10.2 From 85067c06ba0329c37d5a357ced1f39a5583ccc98 Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Sat, 14 Mar 2009 19:59:41 +0530 Subject: ath9k: Keep LED on in idle state after association Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 7d27eed..4c29cef 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c @@ -940,18 +940,25 @@ static void ath_led_blink_work(struct work_struct *work) if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED)) return; - ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, - (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); + + if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) || + (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE)) + ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0); + else + ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, + (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work, (sc->sc_flags & SC_OP_LED_ON) ? msecs_to_jiffies(sc->led_off_duration) : msecs_to_jiffies(sc->led_on_duration)); - sc->led_on_duration = - max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25); - sc->led_off_duration = - max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10); + sc->led_on_duration = sc->led_on_cnt ? + max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) : + ATH_LED_ON_DURATION_IDLE; + sc->led_off_duration = sc->led_off_cnt ? + max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) : + ATH_LED_OFF_DURATION_IDLE; sc->led_on_cnt = sc->led_off_cnt = 0; if (sc->sc_flags & SC_OP_LED_ON) sc->sc_flags &= ~SC_OP_LED_ON; -- cgit v0.10.2 From 3f46b29cd8caa35fcbc46e254a5abeee4e0e9e2f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 14 Mar 2009 19:10:51 +0100 Subject: ieee80211: document DS bit usage I keep needing this because I'm too stupid to remember it. Everybody else can probably remember, but who knows :) Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1bb817..382387e 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -18,6 +18,22 @@ #include #include +/* + * DS bit usage + * + * TA = transmitter address + * RA = receiver address + * DA = destination address + * SA = source address + * + * ToDS FromDS A1(RA) A2(TA) A3 A4 Use + * ----------------------------------------------------------------- + * 0 0 DA SA BSSID - IBSS/DLS + * 0 1 DA BSSID SA - AP -> STA + * 1 0 BSSID SA DA - AP <- STA + * 1 1 RA TA DA SA unspecified (WDS) + */ + #define FCS_LEN 4 #define IEEE80211_FCTL_VERS 0x0003 -- cgit v0.10.2 From 504f365554a7f543fcd706878ff9edf785be7614 Mon Sep 17 00:00:00 2001 From: Nick Kossifidis Date: Sun, 15 Mar 2009 22:13:39 +0200 Subject: ath5k: Choose the right initvals for RF2425 * Fix a typo in initvals.c so that we use the RF2425 array for RF2425 and not RF2413 Note: This also fixes incorect pd gain overlap since RF2425 has different pd gain overlap from RF2413 Signed-off-by: Nick Kossifidis Acked-by: Bob Copeland Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c index 4488643..61fb621 100644 --- a/drivers/net/wireless/ath5k/initvals.c +++ b/drivers/net/wireless/ath5k/initvals.c @@ -1510,8 +1510,8 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel) rf2425_ini_mode_end, mode); ath5k_hw_ini_registers(ah, - ARRAY_SIZE(rf2413_ini_common_end), - rf2413_ini_common_end, change_channel); + ARRAY_SIZE(rf2425_ini_common_end), + rf2425_ini_common_end, change_channel); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), -- cgit v0.10.2 From 8e218fb24faef0bfe95bc91b3c05261e20439527 Mon Sep 17 00:00:00 2001 From: Nick Kossifidis Date: Sun, 15 Mar 2009 22:17:04 +0200 Subject: ath5k: Convert chip specific calibration data to a generic format * Convert chip specific calibration data to a generic format common for all chips Note: We scale up power to be in 0.25dB units for all chips for compatibility with RF5112 v2: Address Bob's and Jiri's comments Signed-off-by: Nick Kossifidis Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c index ac45ca4..c0fb3b0 100644 --- a/drivers/net/wireless/ath5k/eeprom.c +++ b/drivers/net/wireless/ath5k/eeprom.c @@ -1,7 +1,7 @@ /* * Copyright (c) 2004-2008 Reyk Floeter - * Copyright (c) 2006-2008 Nick Kossifidis - * Copyright (c) 2008 Felix Fietkau + * Copyright (c) 2006-2009 Nick Kossifidis + * Copyright (c) 2008-2009 Felix Fietkau * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -98,11 +98,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah) int ret; u16 val; - /* Initial TX thermal adjustment values */ - ee->ee_tx_clip = 4; - ee->ee_pwd_84 = ee->ee_pwd_90 = 1; - ee->ee_gain_select = 1; - /* * Read values from EEPROM and store them in the capability structure */ @@ -241,22 +236,22 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); switch(mode) { case AR5K_EEPROM_MODE_11A: - ee->ee_ob[mode][3] = (val >> 5) & 0x7; - ee->ee_db[mode][3] = (val >> 2) & 0x7; - ee->ee_ob[mode][2] = (val << 1) & 0x7; + ee->ee_ob[mode][3] = (val >> 5) & 0x7; + ee->ee_db[mode][3] = (val >> 2) & 0x7; + ee->ee_ob[mode][2] = (val << 1) & 0x7; AR5K_EEPROM_READ(o++, val); - ee->ee_ob[mode][2] |= (val >> 15) & 0x1; - ee->ee_db[mode][2] = (val >> 12) & 0x7; - ee->ee_ob[mode][1] = (val >> 9) & 0x7; - ee->ee_db[mode][1] = (val >> 6) & 0x7; - ee->ee_ob[mode][0] = (val >> 3) & 0x7; - ee->ee_db[mode][0] = val & 0x7; + ee->ee_ob[mode][2] |= (val >> 15) & 0x1; + ee->ee_db[mode][2] = (val >> 12) & 0x7; + ee->ee_ob[mode][1] = (val >> 9) & 0x7; + ee->ee_db[mode][1] = (val >> 6) & 0x7; + ee->ee_ob[mode][0] = (val >> 3) & 0x7; + ee->ee_db[mode][0] = val & 0x7; break; case AR5K_EEPROM_MODE_11G: case AR5K_EEPROM_MODE_11B: - ee->ee_ob[mode][1] = (val >> 4) & 0x7; - ee->ee_db[mode][1] = val & 0x7; + ee->ee_ob[mode][1] = (val >> 4) & 0x7; + ee->ee_db[mode][1] = val & 0x7; break; } @@ -504,35 +499,6 @@ ath5k_eeprom_init_modes(struct ath5k_hw *ah) return 0; } -/* Used to match PCDAC steps with power values on RF5111 chips - * (eeprom versions < 4). For RF5111 we have 10 pre-defined PCDAC - * steps that match with the power values we read from eeprom. On - * older eeprom versions (< 3.2) these steps are equaly spaced at - * 10% of the pcdac curve -until the curve reaches it's maximum- - * (10 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) - * these 10 steps are spaced in a different way. This function returns - * the pcdac steps based on eeprom version and curve min/max so that we - * can have pcdac/pwr points. - */ -static inline void -ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp) -{ - static const u16 intercepts3[] = - { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 }; - static const u16 intercepts3_2[] = - { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 }; - const u16 *ip; - int i; - - if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2) - ip = intercepts3_2; - else - ip = intercepts3; - - for (i = 0; i < ARRAY_SIZE(intercepts3); i++) - *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100; -} - /* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff * frequency mask) */ static inline int @@ -546,26 +512,25 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, int ret; u16 val; + ee->ee_n_piers[mode] = 0; while(i < max) { AR5K_EEPROM_READ(o++, val); - freq1 = (val >> 8) & 0xff; - freq2 = val & 0xff; - - if (freq1) { - pc[i++].freq = ath5k_eeprom_bin2freq(ee, - freq1, mode); - ee->ee_n_piers[mode]++; - } + freq1 = val & 0xff; + if (!freq1) + break; - if (freq2) { - pc[i++].freq = ath5k_eeprom_bin2freq(ee, - freq2, mode); - ee->ee_n_piers[mode]++; - } + pc[i++].freq = ath5k_eeprom_bin2freq(ee, + freq1, mode); + ee->ee_n_piers[mode]++; - if (!freq1 || !freq2) + freq2 = (val >> 8) & 0xff; + if (!freq2) break; + + pc[i++].freq = ath5k_eeprom_bin2freq(ee, + freq2, mode); + ee->ee_n_piers[mode]++; } /* return new offset */ @@ -652,13 +617,122 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset) return 0; } -/* Read power calibration for RF5111 chips +/* + * Read power calibration for RF5111 chips + * * For RF5111 we have an XPD -eXternal Power Detector- curve - * for each calibrated channel. Each curve has PCDAC steps on - * x axis and power on y axis and looks like a logarithmic - * function. To recreate the curve and pass the power values - * on the pcdac table, we read 10 points here and interpolate later. + * for each calibrated channel. Each curve has 0,5dB Power steps + * on x axis and PCDAC steps (offsets) on y axis and looks like an + * exponential function. To recreate the curve we read 11 points + * here and interpolate later. */ + +/* Used to match PCDAC steps with power values on RF5111 chips + * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC + * steps that match with the power values we read from eeprom. On + * older eeprom versions (< 3.2) these steps are equaly spaced at + * 10% of the pcdac curve -until the curve reaches it's maximum- + * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) + * these 11 steps are spaced in a different way. This function returns + * the pcdac steps based on eeprom version and curve min/max so that we + * can have pcdac/pwr points. + */ +static inline void +ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp) +{ + const static u16 intercepts3[] = + { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 }; + const static u16 intercepts3_2[] = + { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 }; + const u16 *ip; + int i; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2) + ip = intercepts3_2; + else + ip = intercepts3; + + for (i = 0; i < ARRAY_SIZE(intercepts3); i++) + vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100; +} + +/* Convert RF5111 specific data to generic raw data + * used by interpolation code */ +static int +ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, + struct ath5k_chan_pcal_info *chinfo) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf5111 *pcinfo; + struct ath5k_pdgain_info *pd; + u8 pier, point, idx; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + + /* Fill raw data for each calibration pier */ + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + + pcinfo = &chinfo[pier].rf5111_info; + + /* Allocate pd_curves for this cal pier */ + chinfo[pier].pd_curves = + kcalloc(AR5K_EEPROM_N_PD_CURVES, + sizeof(struct ath5k_pdgain_info), + GFP_KERNEL); + + if (!chinfo[pier].pd_curves) + return -ENOMEM; + + /* Only one curve for RF5111 + * find out which one and place + * in in pd_curves. + * Note: ee_x_gain is reversed here */ + for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) { + + if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) { + pdgain_idx[0] = idx; + break; + } + } + + ee->ee_pd_gains[mode] = 1; + + pd = &chinfo[pier].pd_curves[idx]; + + pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, + sizeof(u8), GFP_KERNEL); + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, + sizeof(s16), GFP_KERNEL); + if (!pd->pd_pwr) + return -ENOMEM; + + /* Fill raw dataset + * (convert power to 0.25dB units + * for RF5112 combatibility) */ + for (point = 0; point < pd->pd_points; point++) { + + /* Absolute values */ + pd->pd_pwr[point] = 2 * pcinfo->pwr[point]; + + /* Already sorted */ + pd->pd_step[point] = pcinfo->pcdac[point]; + } + + /* Set min/max pwr */ + chinfo[pier].min_pwr = pd->pd_pwr[0]; + chinfo[pier].max_pwr = pd->pd_pwr[10]; + + } + + return 0; +} + +/* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) { @@ -747,30 +821,165 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) cdata->pcdac_max, cdata->pcdac); } - return 0; + return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal); } -/* Read power calibration for RF5112 chips + +/* + * Read power calibration for RF5112 chips + * * For RF5112 we have 4 XPD -eXternal Power Detector- curves * for each calibrated channel on 0, -6, -12 and -18dbm but we only - * use the higher (3) and the lower (0) curves. Each curve has PCDAC - * steps on x axis and power on y axis and looks like a linear - * function. To recreate the curve and pass the power values - * on the pcdac table, we read 4 points for xpd 0 and 3 points - * for xpd 3 here and interpolate later. + * use the higher (3) and the lower (0) curves. Each curve has 0.5dB + * power steps on x axis and PCDAC steps on y axis and looks like a + * linear function. To recreate the curve and pass the power values + * on hw, we read 4 points for xpd 0 (lower gain -> max power) + * and 3 points for xpd 3 (higher gain -> lower power) here and + * interpolate later. * * Note: Many vendors just use xpd 0 so xpd 3 is zeroed. */ + +/* Convert RF5112 specific data to generic raw data + * used by interpolation code */ +static int +ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode, + struct ath5k_chan_pcal_info *chinfo) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf5112 *pcinfo; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + unsigned int pier, pdg, point; + + /* Fill raw data for each calibration pier */ + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + + pcinfo = &chinfo[pier].rf5112_info; + + /* Allocate pd_curves for this cal pier */ + chinfo[pier].pd_curves = + kcalloc(AR5K_EEPROM_N_PD_CURVES, + sizeof(struct ath5k_pdgain_info), + GFP_KERNEL); + + if (!chinfo[pier].pd_curves) + return -ENOMEM; + + /* Fill pd_curves */ + for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { + + u8 idx = pdgain_idx[pdg]; + struct ath5k_pdgain_info *pd = + &chinfo[pier].pd_curves[idx]; + + /* Lowest gain curve (max power) */ + if (pdg == 0) { + /* One more point for better accuracy */ + pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(pd->pd_points, + sizeof(u8), GFP_KERNEL); + + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(pd->pd_points, + sizeof(s16), GFP_KERNEL); + + if (!pd->pd_pwr) + return -ENOMEM; + + + /* Fill raw dataset + * (all power levels are in 0.25dB units) */ + pd->pd_step[0] = pcinfo->pcdac_x0[0]; + pd->pd_pwr[0] = pcinfo->pwr_x0[0]; + + for (point = 1; point < pd->pd_points; + point++) { + /* Absolute values */ + pd->pd_pwr[point] = + pcinfo->pwr_x0[point]; + + /* Deltas */ + pd->pd_step[point] = + pd->pd_step[point - 1] + + pcinfo->pcdac_x0[point]; + } + + /* Set min power for this frequency */ + chinfo[pier].min_pwr = pd->pd_pwr[0]; + + /* Highest gain curve (min power) */ + } else if (pdg == 1) { + + pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(pd->pd_points, + sizeof(u8), GFP_KERNEL); + + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(pd->pd_points, + sizeof(s16), GFP_KERNEL); + + if (!pd->pd_pwr) + return -ENOMEM; + + /* Fill raw dataset + * (all power levels are in 0.25dB units) */ + for (point = 0; point < pd->pd_points; + point++) { + /* Absolute values */ + pd->pd_pwr[point] = + pcinfo->pwr_x3[point]; + + /* Fixed points */ + pd->pd_step[point] = + pcinfo->pcdac_x3[point]; + } + + /* Since we have a higher gain curve + * override min power */ + chinfo[pier].min_pwr = pd->pd_pwr[0]; + } + } + } + + return 0; +} + +/* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info; struct ath5k_chan_pcal_info *gen_chan_info; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; - unsigned int i, c; + u8 i, c; u16 val; int ret; + u8 pd_gains = 0; + + /* Count how many curves we have and + * identify them (which one of the 4 + * available curves we have on each count). + * Curves are stored from lower (x0) to + * higher (x3) gain */ + for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) { + /* ee_x_gain[mode] is x gain mask */ + if ((ee->ee_x_gain[mode] >> i) & 0x1) + pdgain_idx[pd_gains++] = i; + } + ee->ee_pd_gains[mode] = pd_gains; + + if (pd_gains == 0 || pd_gains > 2) + return -EINVAL; switch (mode) { case AR5K_EEPROM_MODE_11A: @@ -808,13 +1017,13 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) for (i = 0; i < ee->ee_n_piers[mode]; i++) { chan_pcal_info = &gen_chan_info[i].rf5112_info; - /* Power values in dBm * 4 + /* Power values in quarter dB * for the lower xpd gain curve * (0 dBm -> higher output power) */ for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr_x0[c] = (val & 0xff); - chan_pcal_info->pwr_x0[++c] = ((val >> 8) & 0xff); + chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff); + chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff); } /* PCDAC steps @@ -825,12 +1034,12 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f); chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f); - /* Power values in dBm * 4 + /* Power values in quarter dB * for the higher xpd gain curve * (18 dBm -> lower output power) */ AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr_x3[0] = (val & 0xff); - chan_pcal_info->pwr_x3[1] = ((val >> 8) & 0xff); + chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff); + chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff); AR5K_EEPROM_READ(offset++, val); chan_pcal_info->pwr_x3[2] = (val & 0xff); @@ -843,24 +1052,36 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) chan_pcal_info->pcdac_x3[2] = 63; if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) { - chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0xff); + chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f); /* Last xpd0 power level is also channel maximum */ gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3]; } else { chan_pcal_info->pcdac_x0[0] = 1; - gen_chan_info[i].max_pwr = ((val >> 8) & 0xff); + gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff); } - /* Recreate pcdac_x0 table for this channel using pcdac steps */ - chan_pcal_info->pcdac_x0[1] += chan_pcal_info->pcdac_x0[0]; - chan_pcal_info->pcdac_x0[2] += chan_pcal_info->pcdac_x0[1]; - chan_pcal_info->pcdac_x0[3] += chan_pcal_info->pcdac_x0[2]; } - return 0; + return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info); } + +/* + * Read power calibration for RF2413 chips + * + * For RF2413 we have a Power to PDDAC table (Power Detector) + * instead of a PCDAC and 4 pd gain curves for each calibrated channel. + * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y + * axis and looks like an exponential function like the RF5111 curve. + * + * To recreate the curves we read here the points and interpolate + * later. Note that in most cases only 2 (higher and lower) curves are + * used (like RF5112) but vendors have the oportunity to include all + * 4 curves on eeprom. The final curve (higher power) has an extra + * point for better accuracy like RF5112. + */ + /* For RF2413 power calibration data doesn't start on a fixed location and * if a mode is not supported, it's section is missing -not zeroed-. * So we need to calculate the starting offset for each section by using @@ -890,13 +1111,15 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode) switch(mode) { case AR5K_EEPROM_MODE_11G: if (AR5K_EEPROM_HDR_11B(ee->ee_header)) - offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + - AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; + offset += ath5k_pdgains_size_2413(ee, + AR5K_EEPROM_MODE_11B) + + AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; /* fall through */ case AR5K_EEPROM_MODE_11B: if (AR5K_EEPROM_HDR_11A(ee->ee_header)) - offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + - AR5K_EEPROM_N_5GHZ_CHAN / 2; + offset += ath5k_pdgains_size_2413(ee, + AR5K_EEPROM_MODE_11A) + + AR5K_EEPROM_N_5GHZ_CHAN / 2; /* fall through */ case AR5K_EEPROM_MODE_11A: break; @@ -907,37 +1130,118 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode) return offset; } -/* Read power calibration for RF2413 chips - * For RF2413 we have a PDDAC table (Power Detector) instead - * of a PCDAC and 4 pd gain curves for each calibrated channel. - * Each curve has PDDAC steps on x axis and power on y axis and - * looks like an exponential function. To recreate the curves - * we read here the points and interpolate later. Note that - * in most cases only higher and lower curves are used (like - * RF5112) but vendors have the oportunity to include all 4 - * curves on eeprom. The final curve (higher power) has an extra - * point for better accuracy like RF5112. - */ +/* Convert RF2413 specific data to generic raw data + * used by interpolation code */ +static int +ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode, + struct ath5k_chan_pcal_info *chinfo) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf2413 *pcinfo; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + unsigned int pier, pdg, point; + + /* Fill raw data for each calibration pier */ + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + + pcinfo = &chinfo[pier].rf2413_info; + + /* Allocate pd_curves for this cal pier */ + chinfo[pier].pd_curves = + kcalloc(AR5K_EEPROM_N_PD_CURVES, + sizeof(struct ath5k_pdgain_info), + GFP_KERNEL); + + if (!chinfo[pier].pd_curves) + return -ENOMEM; + + /* Fill pd_curves */ + for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { + + u8 idx = pdgain_idx[pdg]; + struct ath5k_pdgain_info *pd = + &chinfo[pier].pd_curves[idx]; + + /* One more point for the highest power + * curve (lowest gain) */ + if (pdg == ee->ee_pd_gains[mode] - 1) + pd->pd_points = AR5K_EEPROM_N_PD_POINTS; + else + pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(pd->pd_points, + sizeof(u8), GFP_KERNEL); + + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(pd->pd_points, + sizeof(s16), GFP_KERNEL); + + if (!pd->pd_pwr) + return -ENOMEM; + + /* Fill raw dataset + * convert all pwr levels to + * quarter dB for RF5112 combatibility */ + pd->pd_step[0] = pcinfo->pddac_i[pdg]; + pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg]; + + for (point = 1; point < pd->pd_points; point++) { + + pd->pd_pwr[point] = pd->pd_pwr[point - 1] + + 2 * pcinfo->pwr[pdg][point - 1]; + + pd->pd_step[point] = pd->pd_step[point - 1] + + pcinfo->pddac[pdg][point - 1]; + + } + + /* Highest gain curve -> min power */ + if (pdg == 0) + chinfo[pier].min_pwr = pd->pd_pwr[0]; + + /* Lowest gain curve -> max power */ + if (pdg == ee->ee_pd_gains[mode] - 1) + chinfo[pier].max_pwr = + pd->pd_pwr[pd->pd_points - 1]; + } + } + + return 0; +} + +/* Parse EEPROM data */ static int ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; - struct ath5k_chan_pcal_info_rf2413 *chan_pcal_info; - struct ath5k_chan_pcal_info *gen_chan_info; - unsigned int i, c; + struct ath5k_chan_pcal_info_rf2413 *pcinfo; + struct ath5k_chan_pcal_info *chinfo; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; - int ret; + int idx, i, ret; u16 val; u8 pd_gains = 0; - if (ee->ee_x_gain[mode] & 0x1) pd_gains++; - if ((ee->ee_x_gain[mode] >> 1) & 0x1) pd_gains++; - if ((ee->ee_x_gain[mode] >> 2) & 0x1) pd_gains++; - if ((ee->ee_x_gain[mode] >> 3) & 0x1) pd_gains++; + /* Count how many curves we have and + * identify them (which one of the 4 + * available curves we have on each count). + * Curves are stored from higher to + * lower gain so we go backwards */ + for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) { + /* ee_x_gain[mode] is x gain mask */ + if ((ee->ee_x_gain[mode] >> idx) & 0x1) + pdgain_idx[pd_gains++] = idx; + + } ee->ee_pd_gains[mode] = pd_gains; + if (pd_gains == 0) + return -EINVAL; + offset = ath5k_cal_data_offset_2413(ee, mode); - ee->ee_n_piers[mode] = 0; switch (mode) { case AR5K_EEPROM_MODE_11A: if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) @@ -945,7 +1249,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) ath5k_eeprom_init_11a_pcal_freq(ah, offset); offset += AR5K_EEPROM_N_5GHZ_CHAN / 2; - gen_chan_info = ee->ee_pwr_cal_a; + chinfo = ee->ee_pwr_cal_a; break; case AR5K_EEPROM_MODE_11B: if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) @@ -953,7 +1257,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) ath5k_eeprom_init_11bg_2413(ah, mode, offset); offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; - gen_chan_info = ee->ee_pwr_cal_b; + chinfo = ee->ee_pwr_cal_b; break; case AR5K_EEPROM_MODE_11G: if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) @@ -961,41 +1265,35 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) ath5k_eeprom_init_11bg_2413(ah, mode, offset); offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; - gen_chan_info = ee->ee_pwr_cal_g; + chinfo = ee->ee_pwr_cal_g; break; default: return -EINVAL; } - if (pd_gains == 0) - return 0; - for (i = 0; i < ee->ee_n_piers[mode]; i++) { - chan_pcal_info = &gen_chan_info[i].rf2413_info; + pcinfo = &chinfo[i].rf2413_info; /* * Read pwr_i, pddac_i and the first * 2 pd points (pwr, pddac) */ AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr_i[0] = val & 0x1f; - chan_pcal_info->pddac_i[0] = (val >> 5) & 0x7f; - chan_pcal_info->pwr[0][0] = - (val >> 12) & 0xf; + pcinfo->pwr_i[0] = val & 0x1f; + pcinfo->pddac_i[0] = (val >> 5) & 0x7f; + pcinfo->pwr[0][0] = (val >> 12) & 0xf; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac[0][0] = val & 0x3f; - chan_pcal_info->pwr[0][1] = (val >> 6) & 0xf; - chan_pcal_info->pddac[0][1] = - (val >> 10) & 0x3f; + pcinfo->pddac[0][0] = val & 0x3f; + pcinfo->pwr[0][1] = (val >> 6) & 0xf; + pcinfo->pddac[0][1] = (val >> 10) & 0x3f; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr[0][2] = val & 0xf; - chan_pcal_info->pddac[0][2] = - (val >> 4) & 0x3f; + pcinfo->pwr[0][2] = val & 0xf; + pcinfo->pddac[0][2] = (val >> 4) & 0x3f; - chan_pcal_info->pwr[0][3] = 0; - chan_pcal_info->pddac[0][3] = 0; + pcinfo->pwr[0][3] = 0; + pcinfo->pddac[0][3] = 0; if (pd_gains > 1) { /* @@ -1003,44 +1301,36 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) * so it only has 2 pd points. * Continue wih pd gain 1. */ - chan_pcal_info->pwr_i[1] = (val >> 10) & 0x1f; + pcinfo->pwr_i[1] = (val >> 10) & 0x1f; - chan_pcal_info->pddac_i[1] = (val >> 15) & 0x1; + pcinfo->pddac_i[1] = (val >> 15) & 0x1; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac_i[1] |= (val & 0x3F) << 1; + pcinfo->pddac_i[1] |= (val & 0x3F) << 1; - chan_pcal_info->pwr[1][0] = (val >> 6) & 0xf; - chan_pcal_info->pddac[1][0] = - (val >> 10) & 0x3f; + pcinfo->pwr[1][0] = (val >> 6) & 0xf; + pcinfo->pddac[1][0] = (val >> 10) & 0x3f; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr[1][1] = val & 0xf; - chan_pcal_info->pddac[1][1] = - (val >> 4) & 0x3f; - chan_pcal_info->pwr[1][2] = - (val >> 10) & 0xf; - - chan_pcal_info->pddac[1][2] = - (val >> 14) & 0x3; + pcinfo->pwr[1][1] = val & 0xf; + pcinfo->pddac[1][1] = (val >> 4) & 0x3f; + pcinfo->pwr[1][2] = (val >> 10) & 0xf; + + pcinfo->pddac[1][2] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac[1][2] |= - (val & 0xF) << 2; + pcinfo->pddac[1][2] |= (val & 0xF) << 2; - chan_pcal_info->pwr[1][3] = 0; - chan_pcal_info->pddac[1][3] = 0; + pcinfo->pwr[1][3] = 0; + pcinfo->pddac[1][3] = 0; } else if (pd_gains == 1) { /* * Pd gain 0 is the last one so * read the extra point. */ - chan_pcal_info->pwr[0][3] = - (val >> 10) & 0xf; + pcinfo->pwr[0][3] = (val >> 10) & 0xf; - chan_pcal_info->pddac[0][3] = - (val >> 14) & 0x3; + pcinfo->pddac[0][3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac[0][3] |= - (val & 0xF) << 2; + pcinfo->pddac[0][3] |= (val & 0xF) << 2; } /* @@ -1048,105 +1338,65 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) * as above. */ if (pd_gains > 2) { - chan_pcal_info->pwr_i[2] = (val >> 4) & 0x1f; - chan_pcal_info->pddac_i[2] = (val >> 9) & 0x7f; + pcinfo->pwr_i[2] = (val >> 4) & 0x1f; + pcinfo->pddac_i[2] = (val >> 9) & 0x7f; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr[2][0] = - (val >> 0) & 0xf; - chan_pcal_info->pddac[2][0] = - (val >> 4) & 0x3f; - chan_pcal_info->pwr[2][1] = - (val >> 10) & 0xf; - - chan_pcal_info->pddac[2][1] = - (val >> 14) & 0x3; + pcinfo->pwr[2][0] = (val >> 0) & 0xf; + pcinfo->pddac[2][0] = (val >> 4) & 0x3f; + pcinfo->pwr[2][1] = (val >> 10) & 0xf; + + pcinfo->pddac[2][1] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac[2][1] |= - (val & 0xF) << 2; + pcinfo->pddac[2][1] |= (val & 0xF) << 2; - chan_pcal_info->pwr[2][2] = - (val >> 4) & 0xf; - chan_pcal_info->pddac[2][2] = - (val >> 8) & 0x3f; + pcinfo->pwr[2][2] = (val >> 4) & 0xf; + pcinfo->pddac[2][2] = (val >> 8) & 0x3f; - chan_pcal_info->pwr[2][3] = 0; - chan_pcal_info->pddac[2][3] = 0; + pcinfo->pwr[2][3] = 0; + pcinfo->pddac[2][3] = 0; } else if (pd_gains == 2) { - chan_pcal_info->pwr[1][3] = - (val >> 4) & 0xf; - chan_pcal_info->pddac[1][3] = - (val >> 8) & 0x3f; + pcinfo->pwr[1][3] = (val >> 4) & 0xf; + pcinfo->pddac[1][3] = (val >> 8) & 0x3f; } if (pd_gains > 3) { - chan_pcal_info->pwr_i[3] = (val >> 14) & 0x3; + pcinfo->pwr_i[3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr_i[3] |= ((val >> 0) & 0x7) << 2; + pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2; - chan_pcal_info->pddac_i[3] = (val >> 3) & 0x7f; - chan_pcal_info->pwr[3][0] = - (val >> 10) & 0xf; - chan_pcal_info->pddac[3][0] = - (val >> 14) & 0x3; + pcinfo->pddac_i[3] = (val >> 3) & 0x7f; + pcinfo->pwr[3][0] = (val >> 10) & 0xf; + pcinfo->pddac[3][0] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac[3][0] |= - (val & 0xF) << 2; - chan_pcal_info->pwr[3][1] = - (val >> 4) & 0xf; - chan_pcal_info->pddac[3][1] = - (val >> 8) & 0x3f; - - chan_pcal_info->pwr[3][2] = - (val >> 14) & 0x3; + pcinfo->pddac[3][0] |= (val & 0xF) << 2; + pcinfo->pwr[3][1] = (val >> 4) & 0xf; + pcinfo->pddac[3][1] = (val >> 8) & 0x3f; + + pcinfo->pwr[3][2] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr[3][2] |= - ((val >> 0) & 0x3) << 2; + pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2; - chan_pcal_info->pddac[3][2] = - (val >> 2) & 0x3f; - chan_pcal_info->pwr[3][3] = - (val >> 8) & 0xf; + pcinfo->pddac[3][2] = (val >> 2) & 0x3f; + pcinfo->pwr[3][3] = (val >> 8) & 0xf; - chan_pcal_info->pddac[3][3] = - (val >> 12) & 0xF; + pcinfo->pddac[3][3] = (val >> 12) & 0xF; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pddac[3][3] |= - ((val >> 0) & 0x3) << 4; + pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4; } else if (pd_gains == 3) { - chan_pcal_info->pwr[2][3] = - (val >> 14) & 0x3; + pcinfo->pwr[2][3] = (val >> 14) & 0x3; AR5K_EEPROM_READ(offset++, val); - chan_pcal_info->pwr[2][3] |= - ((val >> 0) & 0x3) << 2; - - chan_pcal_info->pddac[2][3] = - (val >> 2) & 0x3f; - } + pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2; - for (c = 0; c < pd_gains; c++) { - /* Recreate pwr table for this channel using pwr steps */ - chan_pcal_info->pwr[c][0] += chan_pcal_info->pwr_i[c] * 2; - chan_pcal_info->pwr[c][1] += chan_pcal_info->pwr[c][0]; - chan_pcal_info->pwr[c][2] += chan_pcal_info->pwr[c][1]; - chan_pcal_info->pwr[c][3] += chan_pcal_info->pwr[c][2]; - if (chan_pcal_info->pwr[c][3] == chan_pcal_info->pwr[c][2]) - chan_pcal_info->pwr[c][3] = 0; - - /* Recreate pddac table for this channel using pddac steps */ - chan_pcal_info->pddac[c][0] += chan_pcal_info->pddac_i[c]; - chan_pcal_info->pddac[c][1] += chan_pcal_info->pddac[c][0]; - chan_pcal_info->pddac[c][2] += chan_pcal_info->pddac[c][1]; - chan_pcal_info->pddac[c][3] += chan_pcal_info->pddac[c][2]; - if (chan_pcal_info->pddac[c][3] == chan_pcal_info->pddac[c][2]) - chan_pcal_info->pddac[c][3] = 0; + pcinfo->pddac[2][3] = (val >> 2) & 0x3f; } } - return 0; + return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo); } + /* * Read per rate target power (this is the maximum tx power * supported by the card). This info is used when setting @@ -1154,11 +1404,12 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) * * This also works for v5 EEPROMs. */ -static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) +static int +ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_rate_pcal_info *rate_pcal_info; - u16 *rate_target_pwr_num; + u8 *rate_target_pwr_num; u32 offset; u16 val; int ret, i; @@ -1264,7 +1515,9 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah) else read_pcal = ath5k_eeprom_read_pcal_info_5111; - for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { + + for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; + mode++) { err = read_pcal(ah, mode); if (err) return err; @@ -1277,6 +1530,62 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah) return 0; } +static int +ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *chinfo; + u8 pier, pdg; + + switch (mode) { + case AR5K_EEPROM_MODE_11A: + if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) + return 0; + chinfo = ee->ee_pwr_cal_a; + break; + case AR5K_EEPROM_MODE_11B: + if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) + return 0; + chinfo = ee->ee_pwr_cal_b; + break; + case AR5K_EEPROM_MODE_11G: + if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) + return 0; + chinfo = ee->ee_pwr_cal_g; + break; + default: + return -EINVAL; + } + + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + if (!chinfo[pier].pd_curves) + continue; + + for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { + struct ath5k_pdgain_info *pd = + &chinfo[pier].pd_curves[pdg]; + + if (pd != NULL) { + kfree(pd->pd_step); + kfree(pd->pd_pwr); + } + } + + kfree(chinfo[pier].pd_curves); + } + + return 0; +} + +void +ath5k_eeprom_detach(struct ath5k_hw *ah) +{ + u8 mode; + + for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) + ath5k_eeprom_free_pcal_info(ah, mode); +} + /* Read conformance test limits used for regulatory control */ static int ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) @@ -1457,3 +1766,4 @@ bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah) else return false; } + diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h index 1deebc0..b0c0606 100644 --- a/drivers/net/wireless/ath5k/eeprom.h +++ b/drivers/net/wireless/ath5k/eeprom.h @@ -173,6 +173,7 @@ #define AR5K_EEPROM_N_5GHZ_CHAN 10 #define AR5K_EEPROM_N_2GHZ_CHAN 3 #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 +#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 #define AR5K_EEPROM_MAX_CHAN 10 #define AR5K_EEPROM_N_PWR_POINTS_5111 11 #define AR5K_EEPROM_N_PCDAC 11 @@ -193,7 +194,7 @@ #define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10) #define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32) #define AR5K_EEPROM_MAX_CTLS 32 -#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4 +#define AR5K_EEPROM_N_PD_CURVES 4 #define AR5K_EEPROM_N_XPD0_POINTS 4 #define AR5K_EEPROM_N_XPD3_POINTS 3 #define AR5K_EEPROM_N_PD_GAINS 4 @@ -232,7 +233,7 @@ enum ath5k_ctl_mode { AR5K_CTL_11B = 1, AR5K_CTL_11G = 2, AR5K_CTL_TURBO = 3, - AR5K_CTL_108G = 4, + AR5K_CTL_TURBOG = 4, AR5K_CTL_2GHT20 = 5, AR5K_CTL_5GHT20 = 6, AR5K_CTL_2GHT40 = 7, @@ -240,65 +241,114 @@ enum ath5k_ctl_mode { AR5K_CTL_MODE_M = 15, }; +/* Default CTL ids for the 3 main reg domains. + * Atheros only uses these by default but vendors + * can have up to 32 different CTLs for different + * scenarios. Note that theese values are ORed with + * the mode id (above) so we can have up to 24 CTL + * datasets out of these 3 main regdomains. That leaves + * 8 ids that can be used by vendors and since 0x20 is + * missing from HAL sources i guess this is the set of + * custom CTLs vendors can use. */ +#define AR5K_CTL_FCC 0x10 +#define AR5K_CTL_CUSTOM 0x20 +#define AR5K_CTL_ETSI 0x30 +#define AR5K_CTL_MKK 0x40 + +/* Indicates a CTL with only mode set and + * no reg domain mapping, such CTLs are used + * for world roaming domains or simply when + * a reg domain is not set */ +#define AR5K_CTL_NO_REGDOMAIN 0xf0 + +/* Indicates an empty (invalid) CTL */ +#define AR5K_CTL_NO_CTL 0xff + /* Per channel calibration data, used for power table setup */ struct ath5k_chan_pcal_info_rf5111 { /* Power levels in half dbm units * for one power curve. */ - u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111]; + u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111]; /* PCDAC table steps * for the above values */ - u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111]; + u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111]; /* Starting PCDAC step */ - u8 pcdac_min; + u8 pcdac_min; /* Final PCDAC step */ - u8 pcdac_max; + u8 pcdac_max; }; struct ath5k_chan_pcal_info_rf5112 { /* Power levels in quarter dBm units * for lower (0) and higher (3) - * level curves */ - s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS]; - s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS]; + * level curves in 0.25dB units */ + s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS]; + s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS]; /* PCDAC table steps * for the above values */ - u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS]; - u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS]; + u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS]; + u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS]; }; struct ath5k_chan_pcal_info_rf2413 { /* Starting pwr/pddac values */ - s8 pwr_i[AR5K_EEPROM_N_PD_GAINS]; - u8 pddac_i[AR5K_EEPROM_N_PD_GAINS]; - /* (pwr,pddac) points */ - s8 pwr[AR5K_EEPROM_N_PD_GAINS] - [AR5K_EEPROM_N_PD_POINTS]; - u8 pddac[AR5K_EEPROM_N_PD_GAINS] - [AR5K_EEPROM_N_PD_POINTS]; + s8 pwr_i[AR5K_EEPROM_N_PD_GAINS]; + u8 pddac_i[AR5K_EEPROM_N_PD_GAINS]; + /* (pwr,pddac) points + * power levels in 0.5dB units */ + s8 pwr[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_N_PD_POINTS]; + u8 pddac[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_N_PD_POINTS]; +}; + +enum ath5k_powertable_type { + AR5K_PWRTABLE_PWR_TO_PCDAC = 0, + AR5K_PWRTABLE_LINEAR_PCDAC = 1, + AR5K_PWRTABLE_PWR_TO_PDADC = 2, +}; + +struct ath5k_pdgain_info { + u8 pd_points; + u8 *pd_step; + /* Power values are in + * 0.25dB units */ + s16 *pd_pwr; }; struct ath5k_chan_pcal_info { /* Frequency */ u16 freq; - /* Max available power */ - s8 max_pwr; + /* Tx power boundaries */ + s16 max_pwr; + s16 min_pwr; union { struct ath5k_chan_pcal_info_rf5111 rf5111_info; struct ath5k_chan_pcal_info_rf5112 rf5112_info; struct ath5k_chan_pcal_info_rf2413 rf2413_info; }; + /* Raw values used by phy code + * Curves are stored in order from lower + * gain to higher gain (max txpower -> min txpower) */ + struct ath5k_pdgain_info *pd_curves; }; -/* Per rate calibration data for each mode, used for power table setup */ +/* Per rate calibration data for each mode, + * used for rate power table setup. + * Note: Values in 0.5dB units */ struct ath5k_rate_pcal_info { u16 freq; /* Frequency */ - /* Power level for 6-24Mbit/s rates */ + /* Power level for 6-24Mbit/s rates or + * 1Mb rate */ u16 target_power_6to24; - /* Power level for 36Mbit rate */ + /* Power level for 36Mbit rate or + * 2Mb rate */ u16 target_power_36; - /* Power level for 48Mbit rate */ + /* Power level for 48Mbit rate or + * 5.5Mbit rate */ u16 target_power_48; - /* Power level for 54Mbit rate */ + /* Power level for 54Mbit rate or + * 11Mbit rate */ u16 target_power_54; }; @@ -330,12 +380,6 @@ struct ath5k_eeprom_info { u16 ee_cck_ofdm_power_delta; u16 ee_scaled_cck_delta; - /* Used for tx thermal adjustment (eeprom_init, rfregs) */ - u16 ee_tx_clip; - u16 ee_pwd_84; - u16 ee_pwd_90; - u16 ee_gain_select; - /* RF Calibration settings (reset, rfregs) */ u16 ee_i_cal[AR5K_EEPROM_N_MODES]; u16 ee_q_cal[AR5K_EEPROM_N_MODES]; @@ -363,23 +407,25 @@ struct ath5k_eeprom_info { /* Power calibration data */ u16 ee_false_detect[AR5K_EEPROM_N_MODES]; - /* Number of pd gain curves per mode (RF2413) */ - u8 ee_pd_gains[AR5K_EEPROM_N_MODES]; + /* Number of pd gain curves per mode */ + u8 ee_pd_gains[AR5K_EEPROM_N_MODES]; + /* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */ + u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS]; - u8 ee_n_piers[AR5K_EEPROM_N_MODES]; + u8 ee_n_piers[AR5K_EEPROM_N_MODES]; struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN]; - struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN]; - struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN]; + struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; + struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; /* Per rate target power levels */ - u16 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES]; + u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES]; struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN]; - struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN]; - struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN]; + struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; + struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; /* Conformance test limits (Unused) */ - u16 ee_ctls; - u16 ee_ctl[AR5K_EEPROM_MAX_CTLS]; + u8 ee_ctls; + u8 ee_ctl[AR5K_EEPROM_MAX_CTLS]; struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS]; /* Noise Floor Calibration settings */ -- cgit v0.10.2 From 9ca9fb8aa8422595956af9681518cdb8b167055e Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Mon, 16 Mar 2009 22:34:02 -0400 Subject: ath5k: disable MIB interrupts The MIB interrupt fires whenever counters overflow; however without support for automatic noise immunity, we can sometimes get an interrupt storm. The get_stats() callback reads the counters anyway so we can disable the interrupt for now until ANI is implemented. This fixes the issue reported in http://bugzilla.kernel.org/show_bug.cgi?id=12647. Changes-licensed-under: 3-Clause-BSD Cc: stable@kernel.org Signed-off-by: Bob Copeland Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index cad3ccf..8eec155 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -2305,7 +2305,7 @@ ath5k_init(struct ath5k_softc *sc) sc->curband = &sc->sbands[sc->curchan->band]; sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | - AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; + AR5K_INT_FATAL | AR5K_INT_GLOBAL; ret = ath5k_reset(sc, false, false); if (ret) goto done; -- cgit v0.10.2 From e23a9014fd4d502a419255a83e2479ab804c6f16 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Mon, 16 Mar 2009 22:34:03 -0400 Subject: ath5k: remove dummy PCI "retry timeout" fix Remove the PCI retry timeout code, for all the same reasons that Luis Rodriguez removed it for ath9k. Changes-licensed-under: 3-Clause-BSD Cc: Luis Rodriguez Signed-off-by: Bob Copeland Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 8eec155..ef9825f 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -685,13 +685,6 @@ ath5k_pci_resume(struct pci_dev *pdev) if (err) return err; - /* - * Suspend/Resume resets the PCI configuration space, so we have to - * re-disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state - */ - pci_write_config_byte(pdev, 0x41, 0); - err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); if (err) { ATH5K_ERR(sc, "request_irq failed\n"); -- cgit v0.10.2 From 722f069a6dc95d7c6c2cdfbe3413899a3b768f9c Mon Sep 17 00:00:00 2001 From: Sujith Date: Tue, 17 Mar 2009 08:50:06 +0530 Subject: mac80211: Tear down aggregation sessions for suspend/resume When the driver has been notified with a STA_REMOVE, it tears down the internal ADDBA state. On resume, trying to initiate aggregation would fail because mac80211 has not cleared the operational state for that . This can be fixed by tearing down the existing sessions on a suspend. Also, the driver can initiate a new BA session when suspend is in progress. This is fixed by marking the station as being in suspend state and denying ADDBA requests for such STAs. Signed-off-by: Sujith Acked-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a95affc..07656d8 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -197,6 +197,14 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, status = WLAN_STATUS_REQUEST_DECLINED; + if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Suspend in progress. " + "Denying ADDBA request\n"); +#endif + goto end_no_lock; + } + /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 1df116d..e5776ef 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -257,6 +257,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) goto unlock; } + if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Suspend in progress. " + "Denying BA session request\n"); +#endif + ret = -EINVAL; + goto unlock; + } + spin_lock_bh(&sta->lock); sdata = sta->sdata; diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index ef7be1c..1e6152a 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -21,6 +21,19 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) list_for_each_entry(sdata, &local->interfaces, list) ieee80211_disable_keys(sdata); + /* Tear down aggregation sessions */ + + rcu_read_lock(); + + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + set_sta_flags(sta, WLAN_STA_SUSPEND); + ieee80211_sta_tear_down_BA_sessions(sta); + } + } + + rcu_read_unlock(); + /* remove STAs */ if (local->ops->sta_notify) { spin_lock_irqsave(&local->sta_lock, flags); @@ -102,6 +115,18 @@ int __ieee80211_resume(struct ieee80211_hw *hw) spin_unlock_irqrestore(&local->sta_lock, flags); } + /* Clear Suspend state so that ADDBA requests can be processed */ + + rcu_read_lock(); + + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + clear_sta_flags(sta, WLAN_STA_SUSPEND); + } + } + + rcu_read_unlock(); + /* add back keys */ list_for_each_entry(sdata, &local->interfaces, list) if (netif_running(sdata->dev)) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 1f45573..5b223b2 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -35,6 +35,8 @@ * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next * frame to this station is transmitted. * @WLAN_STA_MFP: Management frame protection is used with this STA. + * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. + * Used to deny ADDBA requests (both TX and RX). */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH = 1<<0, @@ -48,6 +50,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_PSPOLL = 1<<8, WLAN_STA_CLEAR_PS_FILT = 1<<9, WLAN_STA_MFP = 1<<10, + WLAN_STA_SUSPEND = 1<<11 }; #define STA_TID_NUM 16 -- cgit v0.10.2 From 6d5eaafa558783a669bb46c3dba902370e8f0ffc Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Tue, 17 Mar 2009 11:29:19 +0100 Subject: rt2x00: Update MAINTAINERS entry: new mailinglist The rt2400-devel mailinglist will be replaced by the new mailinglist rt2x00-users. Signed-off-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/MAINTAINERS b/MAINTAINERS index fa7be04..bdadcb3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3602,7 +3602,7 @@ S: Maintained RALINK RT2X00 WIRELESS LAN DRIVER P: rt2x00 project L: linux-wireless@vger.kernel.org -L: rt2400-devel@lists.sourceforge.net +L: users@rt2x00.serialmonkey.com W: http://rt2x00.serialmonkey.com/ S: Maintained T: git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git -- cgit v0.10.2 From 8f655dde240293f3b82313cae91c64ffd7b64c50 Mon Sep 17 00:00:00 2001 From: Nick Kossifidis Date: Sun, 15 Mar 2009 22:20:35 +0200 Subject: ath5k: Add tx power calibration support * Add tx power calibration support * Add a few tx power limits * Hardcode default power to 12.5dB * Disable TPC for now v2: Address Jiri's comments Signed-off-by: Nick Kossifidis Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h index 0dc2c73..0b616e7 100644 --- a/drivers/net/wireless/ath5k/ath5k.h +++ b/drivers/net/wireless/ath5k/ath5k.h @@ -204,9 +204,9 @@ #define AR5K_TUNE_CWMAX_11B 1023 #define AR5K_TUNE_CWMAX_XR 7 #define AR5K_TUNE_NOISE_FLOOR -72 -#define AR5K_TUNE_MAX_TXPOWER 60 -#define AR5K_TUNE_DEFAULT_TXPOWER 30 -#define AR5K_TUNE_TPC_TXPOWER true +#define AR5K_TUNE_MAX_TXPOWER 63 +#define AR5K_TUNE_DEFAULT_TXPOWER 25 +#define AR5K_TUNE_TPC_TXPOWER false #define AR5K_TUNE_ANT_DIVERSITY true #define AR5K_TUNE_HWTXTRIES 4 @@ -551,11 +551,11 @@ enum ath5k_pkt_type { */ #define AR5K_TXPOWER_OFDM(_r, _v) ( \ ((0 & 1) << ((_v) + 6)) | \ - (((ah->ah_txpower.txp_rates[(_r)]) & 0x3f) << (_v)) \ + (((ah->ah_txpower.txp_rates_power_table[(_r)]) & 0x3f) << (_v)) \ ) #define AR5K_TXPOWER_CCK(_r, _v) ( \ - (ah->ah_txpower.txp_rates[(_r)] & 0x3f) << (_v) \ + (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \ ) /* @@ -1085,13 +1085,25 @@ struct ath5k_hw { struct ath5k_gain ah_gain; u8 ah_offset[AR5K_MAX_RF_BANKS]; + struct { - u16 txp_pcdac[AR5K_EEPROM_POWER_TABLE_SIZE]; - u16 txp_rates[AR5K_MAX_RATES]; - s16 txp_min; - s16 txp_max; + /* Temporary tables used for interpolation */ + u8 tmpL[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_POWER_TABLE_SIZE]; + u8 tmpR[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_POWER_TABLE_SIZE]; + u8 txp_pd_table[AR5K_EEPROM_POWER_TABLE_SIZE * 2]; + u16 txp_rates_power_table[AR5K_MAX_RATES]; + u8 txp_min_idx; bool txp_tpc; + /* Values in 0.25dB units */ + s16 txp_min_pwr; + s16 txp_max_pwr; + s16 txp_offset; s16 txp_ofdm; + /* Values in dB units */ + s16 txp_cck_ofdm_pwr_delta; + s16 txp_cck_ofdm_gainf_delta; } ah_txpower; struct { @@ -1161,6 +1173,7 @@ extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_l /* EEPROM access functions */ extern int ath5k_eeprom_init(struct ath5k_hw *ah); +extern void ath5k_eeprom_detach(struct ath5k_hw *ah); extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah); @@ -1256,8 +1269,8 @@ extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant); extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah); extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); /* TX power setup */ -extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower); -extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power); +extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower); +extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 ee_mode, u8 txpower); /* * Functions used internaly diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c index 656cb9d..70d376c 100644 --- a/drivers/net/wireless/ath5k/attach.c +++ b/drivers/net/wireless/ath5k/attach.c @@ -341,6 +341,8 @@ void ath5k_hw_detach(struct ath5k_hw *ah) if (ah->ah_rf_banks != NULL) kfree(ah->ah_rf_banks); + ath5k_eeprom_detach(ah); + /* assume interrupts are down */ kfree(ah); } diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index ef9825f..f28b86c 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1209,6 +1209,9 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) pktlen = skb->len; + /* FIXME: If we are in g mode and rate is a CCK rate + * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta + * from tx power (value is in dB units already) */ if (info->control.hw_key) { keyidx = info->control.hw_key->hw_key_idx; pktlen += info->control.hw_key->icv_len; @@ -2037,6 +2040,9 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) antenna = sc->bsent & 4 ? 2 : 1; } + /* FIXME: If we are in g mode and rate is a CCK rate + * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta + * from tx power (value is in dB units already) */ ds->ds_data = bf->skbaddr; ret = ah->ah_setup_tx_desc(ah, ds, skb->len, ieee80211_get_hdrlen_from_skb(skb), @@ -2601,12 +2607,6 @@ ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel) goto err; } - /* - * This is needed only to setup initial state - * but it's best done after a reset. - */ - ath5k_hw_set_txpower_limit(sc->ah, 0); - ret = ath5k_rx_start(sc); if (ret) { ATH5K_ERR(sc, "can't start recv logic\n"); diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c index b40a928..dc30a2b 100644 --- a/drivers/net/wireless/ath5k/desc.c +++ b/drivers/net/wireless/ath5k/desc.c @@ -194,6 +194,10 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, return -EINVAL; } + tx_power += ah->ah_txpower.txp_offset; + if (tx_power > AR5K_TUNE_MAX_TXPOWER) + tx_power = AR5K_TUNE_MAX_TXPOWER; + /* Clear descriptor */ memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc)); diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c index 81f5beb..9e2faae 100644 --- a/drivers/net/wireless/ath5k/phy.c +++ b/drivers/net/wireless/ath5k/phy.c @@ -4,6 +4,7 @@ * Copyright (c) 2004-2007 Reyk Floeter * Copyright (c) 2006-2009 Nick Kossifidis * Copyright (c) 2007-2008 Jiri Slaby + * Copyright (c) 2008-2009 Felix Fietkau * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -183,7 +184,9 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE) return; - ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max, + /* Send the packet with 2dB below max power as + * patent doc suggest */ + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max_pwr - 4, AR5K_PHY_PAPD_PROBE_TXPOWER) | AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE); @@ -1433,93 +1436,1120 @@ unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah) return false; /*XXX: What do we return for 5210 ?*/ } + +/****************\ +* TX power setup * +\****************/ + +/* + * Helper functions + */ + +/* + * Do linear interpolation between two given (x, y) points + */ +static s16 +ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, + s16 y_left, s16 y_right) +{ + s16 ratio, result; + + /* Avoid divide by zero and skip interpolation + * if we have the same point */ + if ((x_left == x_right) || (y_left == y_right)) + return y_left; + + /* + * Since we use ints and not fps, we need to scale up in + * order to get a sane ratio value (or else we 'll eg. get + * always 1 instead of 1.25, 1.75 etc). We scale up by 100 + * to have some accuracy both for 0.5 and 0.25 steps. + */ + ratio = ((100 * y_right - 100 * y_left)/(x_right - x_left)); + + /* Now scale down to be in range */ + result = y_left + (ratio * (target - x_left) / 100); + + return result; +} + +/* + * Find vertical boundary (min pwr) for the linear PCDAC curve. + * + * Since we have the top of the curve and we draw the line below + * until we reach 1 (1 pcdac step) we need to know which point + * (x value) that is so that we don't go below y axis and have negative + * pcdac values when creating the curve, or fill the table with zeroes. + */ +static s16 +ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, + const s16 *pwrL, const s16 *pwrR) +{ + s8 tmp; + s16 min_pwrL, min_pwrR; + s16 pwr_i = pwrL[0]; + + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrL[0], pwrL[1], + stepL[0], stepL[1]); + + } while (tmp > 1); + + min_pwrL = pwr_i; + + pwr_i = pwrR[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrR[0], pwrR[1], + stepR[0], stepR[1]); + + } while (tmp > 1); + + min_pwrR = pwr_i; + + /* Keep the right boundary so that it works for both curves */ + return max(min_pwrL, min_pwrR); +} + +/* + * Interpolate (pwr,vpd) points to create a Power to PDADC or a + * Power to PCDAC curve. + * + * Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC + * steps (offsets) on y axis. Power can go up to 31.5dB and max + * PCDAC/PDADC step for each curve is 64 but we can write more than + * one curves on hw so we can go up to 128 (which is the max step we + * can write on the final table). + * + * We write y values (PCDAC/PDADC steps) on hw. + */ +static void +ath5k_create_power_curve(s16 pmin, s16 pmax, + const s16 *pwr, const u8 *vpd, + u8 num_points, + u8 *vpd_table, u8 type) +{ + u8 idx[2] = { 0, 1 }; + s16 pwr_i = 2*pmin; + int i; + + if (num_points < 2) + return; + + /* We want the whole line, so adjust boundaries + * to cover the entire power range. Note that + * power values are already 0.25dB so no need + * to multiply pwr_i by 2 */ + if (type == AR5K_PWRTABLE_LINEAR_PCDAC) { + pwr_i = pmin; + pmin = 0; + pmax = 63; + } + + /* Find surrounding turning points (TPs) + * and interpolate between them */ + for (i = 0; (i <= (u16) (pmax - pmin)) && + (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) { + + /* We passed the right TP, move to the next set of TPs + * if we pass the last TP, extrapolate above using the last + * two TPs for ratio */ + if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) { + idx[0]++; + idx[1]++; + } + + vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i, + pwr[idx[0]], pwr[idx[1]], + vpd[idx[0]], vpd[idx[1]]); + + /* Increase by 0.5dB + * (0.25 dB units) */ + pwr_i += 2; + } +} + +/* + * Get the surrounding per-channel power calibration piers + * for a given frequency so that we can interpolate between + * them and come up with an apropriate dataset for our current + * channel. + */ +static void +ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + struct ath5k_chan_pcal_info **pcinfo_l, + struct ath5k_chan_pcal_info **pcinfo_r) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *pcinfo; + u8 idx_l, idx_r; + u8 mode, max, i; + u32 target = channel->center_freq; + + idx_l = 0; + idx_r = 0; + + if (!(channel->hw_value & CHANNEL_OFDM)) { + pcinfo = ee->ee_pwr_cal_b; + mode = AR5K_EEPROM_MODE_11B; + } else if (channel->hw_value & CHANNEL_2GHZ) { + pcinfo = ee->ee_pwr_cal_g; + mode = AR5K_EEPROM_MODE_11G; + } else { + pcinfo = ee->ee_pwr_cal_a; + mode = AR5K_EEPROM_MODE_11A; + } + max = ee->ee_n_piers[mode] - 1; + + /* Frequency is below our calibrated + * range. Use the lowest power curve + * we have */ + if (target < pcinfo[0].freq) { + idx_l = idx_r = 0; + goto done; + } + + /* Frequency is above our calibrated + * range. Use the highest power curve + * we have */ + if (target > pcinfo[max].freq) { + idx_l = idx_r = max; + goto done; + } + + /* Frequency is inside our calibrated + * channel range. Pick the surrounding + * calibration piers so that we can + * interpolate */ + for (i = 0; i <= max; i++) { + + /* Frequency matches one of our calibration + * piers, no need to interpolate, just use + * that calibration pier */ + if (pcinfo[i].freq == target) { + idx_l = idx_r = i; + goto done; + } + + /* We found a calibration pier that's above + * frequency, use this pier and the previous + * one to interpolate */ + if (target < pcinfo[i].freq) { + idx_r = i; + idx_l = idx_r - 1; + goto done; + } + } + +done: + *pcinfo_l = &pcinfo[idx_l]; + *pcinfo_r = &pcinfo[idx_r]; + + return; +} + +/* + * Get the surrounding per-rate power calibration data + * for a given frequency and interpolate between power + * values to set max target power supported by hw for + * each rate. + */ +static void +ath5k_get_rate_pcal_data(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + struct ath5k_rate_pcal_info *rates) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_rate_pcal_info *rpinfo; + u8 idx_l, idx_r; + u8 mode, max, i; + u32 target = channel->center_freq; + + idx_l = 0; + idx_r = 0; + + if (!(channel->hw_value & CHANNEL_OFDM)) { + rpinfo = ee->ee_rate_tpwr_b; + mode = AR5K_EEPROM_MODE_11B; + } else if (channel->hw_value & CHANNEL_2GHZ) { + rpinfo = ee->ee_rate_tpwr_g; + mode = AR5K_EEPROM_MODE_11G; + } else { + rpinfo = ee->ee_rate_tpwr_a; + mode = AR5K_EEPROM_MODE_11A; + } + max = ee->ee_rate_target_pwr_num[mode] - 1; + + /* Get the surrounding calibration + * piers - same as above */ + if (target < rpinfo[0].freq) { + idx_l = idx_r = 0; + goto done; + } + + if (target > rpinfo[max].freq) { + idx_l = idx_r = max; + goto done; + } + + for (i = 0; i <= max; i++) { + + if (rpinfo[i].freq == target) { + idx_l = idx_r = i; + goto done; + } + + if (target < rpinfo[i].freq) { + idx_r = i; + idx_l = idx_r - 1; + goto done; + } + } + +done: + /* Now interpolate power value, based on the frequency */ + rates->freq = target; + + rates->target_power_6to24 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_6to24, + rpinfo[idx_r].target_power_6to24); + + rates->target_power_36 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_36, + rpinfo[idx_r].target_power_36); + + rates->target_power_48 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_48, + rpinfo[idx_r].target_power_48); + + rates->target_power_54 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_54, + rpinfo[idx_r].target_power_54); +} + +/* + * Get the max edge power for this channel if + * we have such data from EEPROM's Conformance Test + * Limits (CTL), and limit max power if needed. + * + * FIXME: Only works for world regulatory domains + */ +static void +ath5k_get_max_ctl_power(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_edge_power *rep = ee->ee_ctl_pwr; + u8 *ctl_val = ee->ee_ctl; + s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4; + s16 edge_pwr = 0; + u8 rep_idx; + u8 i, ctl_mode; + u8 ctl_idx = 0xFF; + u32 target = channel->center_freq; + + /* Find out a CTL for our mode that's not mapped + * on a specific reg domain. + * + * TODO: Map our current reg domain to one of the 3 available + * reg domain ids so that we can support more CTLs. */ + switch (channel->hw_value & CHANNEL_MODES) { + case CHANNEL_A: + ctl_mode = AR5K_CTL_11A | AR5K_CTL_NO_REGDOMAIN; + break; + case CHANNEL_G: + ctl_mode = AR5K_CTL_11G | AR5K_CTL_NO_REGDOMAIN; + break; + case CHANNEL_B: + ctl_mode = AR5K_CTL_11B | AR5K_CTL_NO_REGDOMAIN; + break; + case CHANNEL_T: + ctl_mode = AR5K_CTL_TURBO | AR5K_CTL_NO_REGDOMAIN; + break; + case CHANNEL_TG: + ctl_mode = AR5K_CTL_TURBOG | AR5K_CTL_NO_REGDOMAIN; + break; + case CHANNEL_XR: + /* Fall through */ + default: + return; + } + + for (i = 0; i < ee->ee_ctls; i++) { + if (ctl_val[i] == ctl_mode) { + ctl_idx = i; + break; + } + } + + /* If we have a CTL dataset available grab it and find the + * edge power for our frequency */ + if (ctl_idx == 0xFF) + return; + + /* Edge powers are sorted by frequency from lower + * to higher. Each CTL corresponds to 8 edge power + * measurements. */ + rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES; + + /* Don't do boundaries check because we + * might have more that one bands defined + * for this mode */ + + /* Get the edge power that's closer to our + * frequency */ + for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) { + rep_idx += i; + if (target <= rep[rep_idx].freq) + edge_pwr = (s16) rep[rep_idx].edge; + } + + if (edge_pwr) + ah->ah_txpower.txp_max_pwr = 4*min(edge_pwr, max_chan_pwr); +} + + +/* + * Power to PCDAC table functions + */ + /* - * TX power setup + * Fill Power to PCDAC table on RF5111 + * + * No further processing is needed for RF5111, the only thing we have to + * do is fill the values below and above calibration range since eeprom data + * may not cover the entire PCDAC table. */ +static void +ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min, + s16 *table_max) +{ + u8 *pcdac_out = ah->ah_txpower.txp_pd_table; + u8 *pcdac_tmp = ah->ah_txpower.tmpL[0]; + u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i; + s16 min_pwr, max_pwr; + + /* Get table boundaries */ + min_pwr = table_min[0]; + pcdac_0 = pcdac_tmp[0]; + + max_pwr = table_max[0]; + pcdac_n = pcdac_tmp[table_max[0] - table_min[0]]; + + /* Extrapolate below minimum using pcdac_0 */ + pcdac_i = 0; + for (i = 0; i < min_pwr; i++) + pcdac_out[pcdac_i++] = pcdac_0; + + /* Copy values from pcdac_tmp */ + pwr_idx = min_pwr; + for (i = 0 ; pwr_idx <= max_pwr && + pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) { + pcdac_out[pcdac_i++] = pcdac_tmp[i]; + pwr_idx++; + } + + /* Extrapolate above maximum */ + while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE) + pcdac_out[pcdac_i++] = pcdac_n; + +} /* - * Initialize the tx power table (not fully implemented) + * Combine available XPD Curves and fill Linear Power to PCDAC table + * on RF5112 + * + * RFX112 can have up to 2 curves (one for low txpower range and one for + * higher txpower range). We need to put them both on pcdac_out and place + * them in the correct location. In case we only have one curve available + * just fit it on pcdac_out (it's supposed to cover the entire range of + * available pwr levels since it's always the higher power curve). Extrapolate + * below and above final table if needed. */ -static void ath5k_txpower_table(struct ath5k_hw *ah, - struct ieee80211_channel *channel, s16 max_power) +static void +ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min, + s16 *table_max, u8 pdcurves) { - unsigned int i, min, max, n; - u16 txpower, *rates; - - rates = ah->ah_txpower.txp_rates; - - txpower = AR5K_TUNE_DEFAULT_TXPOWER * 2; - if (max_power > txpower) - txpower = max_power > AR5K_TUNE_MAX_TXPOWER ? - AR5K_TUNE_MAX_TXPOWER : max_power; - - for (i = 0; i < AR5K_MAX_RATES; i++) - rates[i] = txpower; - - /* XXX setup target powers by rate */ - - ah->ah_txpower.txp_min = rates[7]; - ah->ah_txpower.txp_max = rates[0]; - ah->ah_txpower.txp_ofdm = rates[0]; - - /* Calculate the power table */ - n = ARRAY_SIZE(ah->ah_txpower.txp_pcdac); - min = AR5K_EEPROM_PCDAC_START; - max = AR5K_EEPROM_PCDAC_STOP; - for (i = 0; i < n; i += AR5K_EEPROM_PCDAC_STEP) - ah->ah_txpower.txp_pcdac[i] = -#ifdef notyet - min + ((i * (max - min)) / n); -#else - min; + u8 *pcdac_out = ah->ah_txpower.txp_pd_table; + u8 *pcdac_low_pwr; + u8 *pcdac_high_pwr; + u8 *pcdac_tmp; + u8 pwr; + s16 max_pwr_idx; + s16 min_pwr_idx; + s16 mid_pwr_idx = 0; + /* Edge flag turs on the 7nth bit on the PCDAC + * to delcare the higher power curve (force values + * to be greater than 64). If we only have one curve + * we don't need to set this, if we have 2 curves and + * fill the table backwards this can also be used to + * switch from higher power curve to lower power curve */ + u8 edge_flag; + int i; + + /* When we have only one curve available + * that's the higher power curve. If we have + * two curves the first is the high power curve + * and the next is the low power curve. */ + if (pdcurves > 1) { + pcdac_low_pwr = ah->ah_txpower.tmpL[1]; + pcdac_high_pwr = ah->ah_txpower.tmpL[0]; + mid_pwr_idx = table_max[1] - table_min[1] - 1; + max_pwr_idx = (table_max[0] - table_min[0]) / 2; + + /* If table size goes beyond 31.5dB, keep the + * upper 31.5dB range when setting tx power. + * Note: 126 = 31.5 dB in quarter dB steps */ + if (table_max[0] - table_min[1] > 126) + min_pwr_idx = table_max[0] - 126; + else + min_pwr_idx = table_min[1]; + + /* Since we fill table backwards + * start from high power curve */ + pcdac_tmp = pcdac_high_pwr; + + edge_flag = 0x40; +#if 0 + /* If both min and max power limits are in lower + * power curve's range, only use the low power curve. + * TODO: min/max levels are related to target + * power values requested from driver/user + * XXX: Is this really needed ? */ + if (min_pwr < table_max[1] && + max_pwr < table_max[1]) { + edge_flag = 0; + pcdac_tmp = pcdac_low_pwr; + max_pwr_idx = (table_max[1] - table_min[1])/2; + } #endif + } else { + pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */ + pcdac_high_pwr = ah->ah_txpower.tmpL[0]; + min_pwr_idx = table_min[0]; + max_pwr_idx = (table_max[0] - table_min[0]) / 2; + pcdac_tmp = pcdac_high_pwr; + edge_flag = 0; + } + + /* This is used when setting tx power*/ + ah->ah_txpower.txp_min_idx = min_pwr_idx/2; + + /* Fill Power to PCDAC table backwards */ + pwr = max_pwr_idx; + for (i = 63; i >= 0; i--) { + /* Entering lower power range, reset + * edge flag and set pcdac_tmp to lower + * power curve.*/ + if (edge_flag == 0x40 && + (2*pwr <= (table_max[1] - table_min[0]) || pwr == 0)) { + edge_flag = 0x00; + pcdac_tmp = pcdac_low_pwr; + pwr = mid_pwr_idx/2; + } + + /* Don't go below 1, extrapolate below if we have + * already swithced to the lower power curve -or + * we only have one curve and edge_flag is zero + * anyway */ + if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) { + while (i >= 0) { + pcdac_out[i] = pcdac_out[i + 1]; + i--; + } + break; + } + + pcdac_out[i] = pcdac_tmp[pwr] | edge_flag; + + /* Extrapolate above if pcdac is greater than + * 126 -this can happen because we OR pcdac_out + * value with edge_flag on high power curve */ + if (pcdac_out[i] > 126) + pcdac_out[i] = 126; + + /* Decrease by a 0.5dB step */ + pwr--; + } } +/* Write PCDAC values on hw */ +static void +ath5k_setup_pcdac_table(struct ath5k_hw *ah) +{ + u8 *pcdac_out = ah->ah_txpower.txp_pd_table; + int i; + + /* + * Write TX power values + */ + for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) { + ath5k_hw_reg_write(ah, + (((pcdac_out[2*i + 0] << 8 | 0xff) & 0xffff) << 0) | + (((pcdac_out[2*i + 1] << 8 | 0xff) & 0xffff) << 16), + AR5K_PHY_PCDAC_TXPOWER(i)); + } +} + + /* - * Set transmition power + * Power to PDADC table functions */ -int /*O.K. - txpower_table is unimplemented so this doesn't work*/ -ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, - unsigned int txpower) + +/* + * Set the gain boundaries and create final Power to PDADC table + * + * We can have up to 4 pd curves, we need to do a simmilar process + * as we do for RF5112. This time we don't have an edge_flag but we + * set the gain boundaries on a separate register. + */ +static void +ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah, + s16 *pwr_min, s16 *pwr_max, u8 pdcurves) { - bool tpc = ah->ah_txpower.txp_tpc; - unsigned int i; + u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS]; + u8 *pdadc_out = ah->ah_txpower.txp_pd_table; + u8 *pdadc_tmp; + s16 pdadc_0; + u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size; + u8 pd_gain_overlap; + + /* Note: Register value is initialized on initvals + * there is no feedback from hw. + * XXX: What about pd_gain_overlap from EEPROM ? */ + pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) & + AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP; + + /* Create final PDADC table */ + for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) { + pdadc_tmp = ah->ah_txpower.tmpL[pdg]; + + if (pdg == pdcurves - 1) + /* 2 dB boundary stretch for last + * (higher power) curve */ + gain_boundaries[pdg] = pwr_max[pdg] + 4; + else + /* Set gain boundary in the middle + * between this curve and the next one */ + gain_boundaries[pdg] = + (pwr_max[pdg] + pwr_min[pdg + 1]) / 2; + + /* Sanity check in case our 2 db stretch got out of + * range. */ + if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER) + gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER; + + /* For the first curve (lower power) + * start from 0 dB */ + if (pdg == 0) + pdadc_0 = 0; + else + /* For the other curves use the gain overlap */ + pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) - + pd_gain_overlap; - ATH5K_TRACE(ah->ah_sc); - if (txpower > AR5K_TUNE_MAX_TXPOWER) { - ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); - return -EINVAL; + /* Force each power step to be at least 0.5 dB */ + if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1) + pwr_step = pdadc_tmp[1] - pdadc_tmp[0]; + else + pwr_step = 1; + + /* If pdadc_0 is negative, we need to extrapolate + * below this pdgain by a number of pwr_steps */ + while ((pdadc_0 < 0) && (pdadc_i < 128)) { + s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step; + pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp; + pdadc_0++; + } + + /* Set last pwr level, using gain boundaries */ + pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg]; + /* Limit it to be inside pwr range */ + table_size = pwr_max[pdg] - pwr_min[pdg]; + max_idx = (pdadc_n < table_size) ? pdadc_n : table_size; + + /* Fill pdadc_out table */ + while (pdadc_0 < max_idx) + pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++]; + + /* Need to extrapolate above this pdgain? */ + if (pdadc_n <= max_idx) + continue; + + /* Force each power step to be at least 0.5 dB */ + if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1) + pwr_step = pdadc_tmp[table_size - 1] - + pdadc_tmp[table_size - 2]; + else + pwr_step = 1; + + /* Extrapolate above */ + while ((pdadc_0 < (s16) pdadc_n) && + (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) { + s16 tmp = pdadc_tmp[table_size - 1] + + (pdadc_0 - max_idx) * pwr_step; + pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp; + pdadc_0++; + } } + while (pdg < AR5K_EEPROM_N_PD_GAINS) { + gain_boundaries[pdg] = gain_boundaries[pdg - 1]; + pdg++; + } + + while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) { + pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1]; + pdadc_i++; + } + + /* Set gain boundaries */ + ath5k_hw_reg_write(ah, + AR5K_REG_SM(pd_gain_overlap, + AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) | + AR5K_REG_SM(gain_boundaries[0], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) | + AR5K_REG_SM(gain_boundaries[1], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) | + AR5K_REG_SM(gain_boundaries[2], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) | + AR5K_REG_SM(gain_boundaries[3], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4), + AR5K_PHY_TPC_RG5); + + /* Used for setting rate power table */ + ah->ah_txpower.txp_min_idx = pwr_min[0]; + +} + +/* Write PDADC values on hw */ +static void +ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah, + u8 pdcurves, u8 *pdg_to_idx) +{ + u8 *pdadc_out = ah->ah_txpower.txp_pd_table; + u32 reg; + u8 i; + + /* Select the right pdgain curves */ + + /* Clear current settings */ + reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1); + reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 | + AR5K_PHY_TPC_RG1_PDGAIN_2 | + AR5K_PHY_TPC_RG1_PDGAIN_3 | + AR5K_PHY_TPC_RG1_NUM_PD_GAIN); + /* - * RF2413 for some reason can't - * transmit anything if we call - * this funtion, so we skip it - * until we fix txpower. + * Use pd_gains curve from eeprom * - * XXX: Assume same for RF2425 - * to be safe. + * This overrides the default setting from initvals + * in case some vendors (e.g. Zcomax) don't use the default + * curves. If we don't honor their settings we 'll get a + * 5dB (1 * gain overlap ?) drop. */ - if ((ah->ah_radio == AR5K_RF2413) || (ah->ah_radio == AR5K_RF2425)) - return 0; + reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN); - /* Reset TX power values */ - memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); - ah->ah_txpower.txp_tpc = tpc; - - /* Initialize TX power table */ - ath5k_txpower_table(ah, channel, txpower); + switch (pdcurves) { + case 3: + reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3); + /* Fall through */ + case 2: + reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2); + /* Fall through */ + case 1: + reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1); + break; + } + ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1); /* * Write TX power values */ for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) { ath5k_hw_reg_write(ah, - ((((ah->ah_txpower.txp_pcdac[(i << 1) + 1] << 8) | 0xff) & 0xffff) << 16) | - (((ah->ah_txpower.txp_pcdac[(i << 1) ] << 8) | 0xff) & 0xffff), - AR5K_PHY_PCDAC_TXPOWER(i)); + ((pdadc_out[4*i + 0] & 0xff) << 0) | + ((pdadc_out[4*i + 1] & 0xff) << 8) | + ((pdadc_out[4*i + 2] & 0xff) << 16) | + ((pdadc_out[4*i + 3] & 0xff) << 24), + AR5K_PHY_PDADC_TXPOWER(i)); + } +} + + +/* + * Common code for PCDAC/PDADC tables + */ + +/* + * This is the main function that uses all of the above + * to set PCDAC/PDADC table on hw for the current channel. + * This table is used for tx power calibration on the basband, + * without it we get weird tx power levels and in some cases + * distorted spectral mask + */ +static int +ath5k_setup_channel_powertable(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + u8 ee_mode, u8 type) +{ + struct ath5k_pdgain_info *pdg_L, *pdg_R; + struct ath5k_chan_pcal_info *pcinfo_L; + struct ath5k_chan_pcal_info *pcinfo_R; + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode]; + s16 table_min[AR5K_EEPROM_N_PD_GAINS]; + s16 table_max[AR5K_EEPROM_N_PD_GAINS]; + u8 *tmpL; + u8 *tmpR; + u32 target = channel->center_freq; + int pdg, i; + + /* Get surounding freq piers for this channel */ + ath5k_get_chan_pcal_surrounding_piers(ah, channel, + &pcinfo_L, + &pcinfo_R); + + /* Loop over pd gain curves on + * surounding freq piers by index */ + for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) { + + /* Fill curves in reverse order + * from lower power (max gain) + * to higher power. Use curve -> idx + * backmaping we did on eeprom init */ + u8 idx = pdg_curve_to_idx[pdg]; + + /* Grab the needed curves by index */ + pdg_L = &pcinfo_L->pd_curves[idx]; + pdg_R = &pcinfo_R->pd_curves[idx]; + + /* Initialize the temp tables */ + tmpL = ah->ah_txpower.tmpL[pdg]; + tmpR = ah->ah_txpower.tmpR[pdg]; + + /* Set curve's x boundaries and create + * curves so that they cover the same + * range (if we don't do that one table + * will have values on some range and the + * other one won't have any so interpolation + * will fail) */ + table_min[pdg] = min(pdg_L->pd_pwr[0], + pdg_R->pd_pwr[0]) / 2; + + table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1], + pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2; + + /* Now create the curves on surrounding channels + * and interpolate if needed to get the final + * curve for this gain on this channel */ + switch (type) { + case AR5K_PWRTABLE_LINEAR_PCDAC: + /* Override min/max so that we don't loose + * accuracy (don't divide by 2) */ + table_min[pdg] = min(pdg_L->pd_pwr[0], + pdg_R->pd_pwr[0]); + + table_max[pdg] = + max(pdg_L->pd_pwr[pdg_L->pd_points - 1], + pdg_R->pd_pwr[pdg_R->pd_points - 1]); + + /* Override minimum so that we don't get + * out of bounds while extrapolating + * below. Don't do this when we have 2 + * curves and we are on the high power curve + * because table_min is ok in this case */ + if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) { + + table_min[pdg] = + ath5k_get_linear_pcdac_min(pdg_L->pd_step, + pdg_R->pd_step, + pdg_L->pd_pwr, + pdg_R->pd_pwr); + + /* Don't go too low because we will + * miss the upper part of the curve. + * Note: 126 = 31.5dB (max power supported) + * in 0.25dB units */ + if (table_max[pdg] - table_min[pdg] > 126) + table_min[pdg] = table_max[pdg] - 126; + } + + /* Fall through */ + case AR5K_PWRTABLE_PWR_TO_PCDAC: + case AR5K_PWRTABLE_PWR_TO_PDADC: + + ath5k_create_power_curve(table_min[pdg], + table_max[pdg], + pdg_L->pd_pwr, + pdg_L->pd_step, + pdg_L->pd_points, tmpL, type); + + /* We are in a calibration + * pier, no need to interpolate + * between freq piers */ + if (pcinfo_L == pcinfo_R) + continue; + + ath5k_create_power_curve(table_min[pdg], + table_max[pdg], + pdg_R->pd_pwr, + pdg_R->pd_step, + pdg_R->pd_points, tmpR, type); + break; + default: + return -EINVAL; + } + + /* Interpolate between curves + * of surounding freq piers to + * get the final curve for this + * pd gain. Re-use tmpL for interpolation + * output */ + for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) && + (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) { + tmpL[i] = (u8) ath5k_get_interpolated_value(target, + (s16) pcinfo_L->freq, + (s16) pcinfo_R->freq, + (s16) tmpL[i], + (s16) tmpR[i]); + } } + /* Now we have a set of curves for this + * channel on tmpL (x range is table_max - table_min + * and y values are tmpL[pdg][]) sorted in the same + * order as EEPROM (because we've used the backmaping). + * So for RF5112 it's from higher power to lower power + * and for RF2413 it's from lower power to higher power. + * For RF5111 we only have one curve. */ + + /* Fill min and max power levels for this + * channel by interpolating the values on + * surounding channels to complete the dataset */ + ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target, + (s16) pcinfo_L->freq, + (s16) pcinfo_R->freq, + pcinfo_L->min_pwr, pcinfo_R->min_pwr); + + ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target, + (s16) pcinfo_L->freq, + (s16) pcinfo_R->freq, + pcinfo_L->max_pwr, pcinfo_R->max_pwr); + + /* We are ready to go, fill PCDAC/PDADC + * table and write settings on hardware */ + switch (type) { + case AR5K_PWRTABLE_LINEAR_PCDAC: + /* For RF5112 we can have one or two curves + * and each curve covers a certain power lvl + * range so we need to do some more processing */ + ath5k_combine_linear_pcdac_curves(ah, table_min, table_max, + ee->ee_pd_gains[ee_mode]); + + /* Set txp.offset so that we can + * match max power value with max + * table index */ + ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2); + + /* Write settings on hw */ + ath5k_setup_pcdac_table(ah); + break; + case AR5K_PWRTABLE_PWR_TO_PCDAC: + /* We are done for RF5111 since it has only + * one curve, just fit the curve on the table */ + ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max); + + /* No rate powertable adjustment for RF5111 */ + ah->ah_txpower.txp_min_idx = 0; + ah->ah_txpower.txp_offset = 0; + + /* Write settings on hw */ + ath5k_setup_pcdac_table(ah); + break; + case AR5K_PWRTABLE_PWR_TO_PDADC: + /* Set PDADC boundaries and fill + * final PDADC table */ + ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max, + ee->ee_pd_gains[ee_mode]); + + /* Write settings on hw */ + ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx); + + /* Set txp.offset, note that table_min + * can be negative */ + ah->ah_txpower.txp_offset = table_min[0]; + break; + default: + return -EINVAL; + } + + return 0; +} + + +/* + * Per-rate tx power setting + * + * This is the code that sets the desired tx power (below + * maximum) on hw for each rate (we also have TPC that sets + * power per packet). We do that by providing an index on the + * PCDAC/PDADC table we set up. + */ + +/* + * Set rate power table + * + * For now we only limit txpower based on maximum tx power + * supported by hw (what's inside rate_info). We need to limit + * this even more, based on regulatory domain etc. + * + * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps) + * and is indexed as follows: + * rates[0] - rates[7] -> OFDM rates + * rates[8] - rates[14] -> CCK rates + * rates[15] -> XR rates (they all have the same power) + */ +static void +ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, + struct ath5k_rate_pcal_info *rate_info, + u8 ee_mode) +{ + unsigned int i; + u16 *rates; + + /* max_pwr is power level we got from driver/user in 0.5dB + * units, switch to 0.25dB units so we can compare */ + max_pwr *= 2; + max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2; + + /* apply rate limits */ + rates = ah->ah_txpower.txp_rates_power_table; + + /* OFDM rates 6 to 24Mb/s */ + for (i = 0; i < 5; i++) + rates[i] = min(max_pwr, rate_info->target_power_6to24); + + /* Rest OFDM rates */ + rates[5] = min(rates[0], rate_info->target_power_36); + rates[6] = min(rates[0], rate_info->target_power_48); + rates[7] = min(rates[0], rate_info->target_power_54); + + /* CCK rates */ + /* 1L */ + rates[8] = min(rates[0], rate_info->target_power_6to24); + /* 2L */ + rates[9] = min(rates[0], rate_info->target_power_36); + /* 2S */ + rates[10] = min(rates[0], rate_info->target_power_36); + /* 5L */ + rates[11] = min(rates[0], rate_info->target_power_48); + /* 5S */ + rates[12] = min(rates[0], rate_info->target_power_48); + /* 11L */ + rates[13] = min(rates[0], rate_info->target_power_54); + /* 11S */ + rates[14] = min(rates[0], rate_info->target_power_54); + + /* XR rates */ + rates[15] = min(rates[0], rate_info->target_power_6to24); + + /* CCK rates have different peak to average ratio + * so we have to tweak their power so that gainf + * correction works ok. For this we use OFDM to + * CCK delta from eeprom */ + if ((ee_mode == AR5K_EEPROM_MODE_11G) && + (ah->ah_phy_revision < AR5K_SREV_PHY_5212A)) + for (i = 8; i <= 15; i++) + rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta; + + ah->ah_txpower.txp_min_pwr = rates[7]; + ah->ah_txpower.txp_max_pwr = rates[0]; + ah->ah_txpower.txp_ofdm = rates[7]; +} + + +/* + * Set transmition power + */ +int +ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, + u8 ee_mode, u8 txpower) +{ + struct ath5k_rate_pcal_info rate_info; + u8 type; + int ret; + + ATH5K_TRACE(ah->ah_sc); + if (txpower > AR5K_TUNE_MAX_TXPOWER) { + ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); + return -EINVAL; + } + if (txpower == 0) + txpower = AR5K_TUNE_DEFAULT_TXPOWER; + + /* Reset TX power values */ + memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); + ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; + ah->ah_txpower.txp_min_pwr = 0; + ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER; + + /* Initialize TX power table */ + switch (ah->ah_radio) { + case AR5K_RF5111: + type = AR5K_PWRTABLE_PWR_TO_PCDAC; + break; + case AR5K_RF5112: + type = AR5K_PWRTABLE_LINEAR_PCDAC; + break; + case AR5K_RF2413: + case AR5K_RF5413: + case AR5K_RF2316: + case AR5K_RF2317: + case AR5K_RF2425: + type = AR5K_PWRTABLE_PWR_TO_PDADC; + break; + default: + return -EINVAL; + } + + /* FIXME: Only on channel/mode change */ + ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type); + if (ret) + return ret; + + /* Limit max power if we have a CTL available */ + ath5k_get_max_ctl_power(ah, channel); + + /* FIXME: Tx power limit for this regdomain + * XXX: Mac80211/CRDA will do that anyway ? */ + + /* FIXME: Antenna reduction stuff */ + + /* FIXME: Limit power on turbo modes */ + + /* FIXME: TPC scale reduction */ + + /* Get surounding channels for per-rate power table + * calibration */ + ath5k_get_rate_pcal_data(ah, channel, &rate_info); + + /* Setup rate power table */ + ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode); + + /* Write rate power table on hw */ ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) | AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) | AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1); @@ -1536,26 +2566,34 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) | AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4); - if (ah->ah_txpower.txp_tpc) + /* FIXME: TPC support */ + if (ah->ah_txpower.txp_tpc) { ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE | AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); - else + + ath5k_hw_reg_write(ah, + AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) | + AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) | + AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP), + AR5K_TPC); + } else { ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX | AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); + } return 0; } -int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power) +int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 mode, u8 txpower) { /*Just a try M.F.*/ struct ieee80211_channel *channel = &ah->ah_current_channel; ATH5K_TRACE(ah->ah_sc); ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER, - "changing txpower to %d\n", power); + "changing txpower to %d\n", txpower); - return ath5k_hw_txpower(ah, channel, power); + return ath5k_hw_txpower(ah, channel, mode, txpower); } #undef _ATH5K_PHY diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h index 2dc008e..7070d15 100644 --- a/drivers/net/wireless/ath5k/reg.h +++ b/drivers/net/wireless/ath5k/reg.h @@ -1554,6 +1554,19 @@ /*===5212 Specific PCU registers===*/ /* + * Transmit power control register + */ +#define AR5K_TPC 0x80e8 +#define AR5K_TPC_ACK 0x0000003f /* ack frames */ +#define AR5K_TPC_ACK_S 0 +#define AR5K_TPC_CTS 0x00003f00 /* cts frames */ +#define AR5K_TPC_CTS_S 8 +#define AR5K_TPC_CHIRP 0x003f0000 /* chirp frames */ +#define AR5K_TPC_CHIRP_S 16 +#define AR5K_TPC_DOPPLER 0x0f000000 /* doppler chirp span */ +#define AR5K_TPC_DOPPLER_S 24 + +/* * XR (eXtended Range) mode register */ #define AR5K_XRMODE 0x80c0 /* Register Address */ @@ -2550,6 +2563,12 @@ #define AR5K_PHY_TPC_RG1 0xa258 #define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000 #define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14 +#define AR5K_PHY_TPC_RG1_PDGAIN_1 0x00030000 +#define AR5K_PHY_TPC_RG1_PDGAIN_1_S 16 +#define AR5K_PHY_TPC_RG1_PDGAIN_2 0x000c0000 +#define AR5K_PHY_TPC_RG1_PDGAIN_2_S 18 +#define AR5K_PHY_TPC_RG1_PDGAIN_3 0x00300000 +#define AR5K_PHY_TPC_RG1_PDGAIN_3_S 20 #define AR5K_PHY_TPC_RG5 0xa26C #define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c index 685dc21..7a17d31 100644 --- a/drivers/net/wireless/ath5k/reset.c +++ b/drivers/net/wireless/ath5k/reset.c @@ -664,29 +664,35 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 *ant, u8 ee_mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + s16 cck_ofdm_pwr_delta; - /* Set CCK to OFDM power delta */ - if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { - int16_t cck_ofdm_pwr_delta; - - /* Adjust power delta for channel 14 */ - if (channel->center_freq == 2484) - cck_ofdm_pwr_delta = - ((ee->ee_cck_ofdm_power_delta - - ee->ee_scaled_cck_delta) * 2) / 10; - else - cck_ofdm_pwr_delta = - (ee->ee_cck_ofdm_power_delta * 2) / 10; + /* Adjust power delta for channel 14 */ + if (channel->center_freq == 2484) + cck_ofdm_pwr_delta = + ((ee->ee_cck_ofdm_power_delta - + ee->ee_scaled_cck_delta) * 2) / 10; + else + cck_ofdm_pwr_delta = + (ee->ee_cck_ofdm_power_delta * 2) / 10; + /* Set CCK to OFDM power delta on tx power + * adjustment register */ + if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { if (channel->hw_value == CHANNEL_G) ath5k_hw_reg_write(ah, - AR5K_REG_SM((ee->ee_cck_ofdm_power_delta * -1), + AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1), AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) | AR5K_REG_SM((cck_ofdm_pwr_delta * -1), AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX), AR5K_PHY_TX_PWR_ADJ); else ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ); + } else { + /* For older revs we scale power on sw during tx power + * setup */ + ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta; + ah->ah_txpower.txp_cck_ofdm_gainf_delta = + ee->ee_cck_ofdm_gain_delta; } /* Set antenna idle switch table */ @@ -994,7 +1000,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, /* * Set TX power (FIXME) */ - ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER); + ret = ath5k_hw_txpower(ah, channel, ee_mode, + AR5K_TUNE_DEFAULT_TXPOWER); if (ret) return ret; -- cgit v0.10.2 From 3b85875a252dbbd95c2e04d73639719a0a79634e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 12 Mar 2009 09:55:09 +0100 Subject: nl80211: rework locking When I added scanning to cfg80211, we got a lock dependency like this: rtnl --> cfg80211_mtx nl80211, on the other hand, has the reverse lock dependency: cfg80211_mtx --> rtnl which clearly is a bad idea. This patch reworks nl80211 to take these two locks in the other order to fix the possible, and easily triggerable, deadlock. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 58ee1b1..a3ecf8d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -602,9 +602,12 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) memset(¶ms, 0, sizeof(params)); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; + ifindex = dev->ifindex; type = dev->ieee80211_ptr->iftype; dev_put(dev); @@ -641,17 +644,17 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) if (!err) flags = &_flags; } - rtnl_lock(); + err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, type, flags, ¶ms); dev = __dev_get_by_index(&init_net, ifindex); WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type)); - rtnl_unlock(); - unlock: cfg80211_put_dev(drv); + unlock_rtnl: + rtnl_unlock(); return err; } @@ -674,9 +677,13 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + rtnl_lock(); + drv = cfg80211_get_dev_from_info(info); - if (IS_ERR(drv)) - return PTR_ERR(drv); + if (IS_ERR(drv)) { + err = PTR_ERR(drv); + goto unlock_rtnl; + } if (!drv->ops->add_virtual_intf || !(drv->wiphy.interface_modes & (1 << type))) { @@ -690,18 +697,17 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); } - rtnl_lock(); err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); err = drv->ops->add_virtual_intf(&drv->wiphy, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, ¶ms); - rtnl_unlock(); - unlock: cfg80211_put_dev(drv); + unlock_rtnl: + rtnl_unlock(); return err; } @@ -711,9 +717,11 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) int ifindex, err; struct net_device *dev; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; ifindex = dev->ifindex; dev_put(dev); @@ -722,12 +730,12 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) goto out; } - rtnl_lock(); err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex); - rtnl_unlock(); out: cfg80211_put_dev(drv); + unlock_rtnl: + rtnl_unlock(); return err; } @@ -779,9 +787,11 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; if (!drv->ops->get_key) { err = -EOPNOTSUPP; @@ -809,10 +819,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) if (mac_addr) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); - rtnl_lock(); err = drv->ops->get_key(&drv->wiphy, dev, key_idx, mac_addr, &cookie, get_key_callback); - rtnl_unlock(); if (err) goto out; @@ -830,6 +838,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) out: cfg80211_put_dev(drv); dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; } @@ -858,9 +869,11 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) !info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]) return -EINVAL; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; if (info->attrs[NL80211_ATTR_KEY_DEFAULT]) func = drv->ops->set_default_key; @@ -872,13 +885,15 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) goto out; } - rtnl_lock(); err = func(&drv->wiphy, dev, key_idx); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + + unlock_rtnl: + rtnl_unlock(); + return err; } @@ -948,22 +963,25 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; if (!drv->ops->add_key) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->add_key(&drv->wiphy, dev, key_idx, mac_addr, ¶ms); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; } @@ -984,22 +1002,26 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; if (!drv->ops->del_key) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + + unlock_rtnl: + rtnl_unlock(); + return err; } @@ -1013,9 +1035,11 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) struct beacon_parameters params; int haveinfo = 0; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; switch (info->genlhdr->cmd) { case NL80211_CMD_NEW_BEACON: @@ -1076,13 +1100,14 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) goto out; } - rtnl_lock(); err = call(&drv->wiphy, dev, ¶ms); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; } @@ -1092,22 +1117,25 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) int err; struct net_device *dev; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto unlock_rtnl; if (!drv->ops->del_beacon) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->del_beacon(&drv->wiphy, dev); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; } @@ -1273,14 +1301,18 @@ static int nl80211_dump_station(struct sk_buff *skb, return -EINVAL; } - netdev = dev_get_by_index(&init_net, ifidx); - if (!netdev) - return -ENODEV; + rtnl_lock(); + + netdev = __dev_get_by_index(&init_net, ifidx); + if (!netdev) { + err = -ENODEV; + goto out_rtnl; + } dev = cfg80211_get_dev_from_ifindex(ifidx); if (IS_ERR(dev)) { err = PTR_ERR(dev); - goto out_put_netdev; + goto out_rtnl; } if (!dev->ops->dump_station) { @@ -1288,15 +1320,13 @@ static int nl80211_dump_station(struct sk_buff *skb, goto out_err; } - rtnl_lock(); - while (1) { err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, mac_addr, &sinfo); if (err == -ENOENT) break; if (err) - goto out_err_rtnl; + goto out_err; if (nl80211_send_station(skb, NETLINK_CB(cb->skb).pid, @@ -1312,12 +1342,10 @@ static int nl80211_dump_station(struct sk_buff *skb, out: cb->args[1] = sta_idx; err = skb->len; - out_err_rtnl: - rtnl_unlock(); out_err: cfg80211_put_dev(dev); - out_put_netdev: - dev_put(netdev); + out_rtnl: + rtnl_unlock(); return err; } @@ -1338,19 +1366,18 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->get_station) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo); - rtnl_unlock(); - if (err) goto out; @@ -1367,10 +1394,12 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) out_free: nlmsg_free(msg); - out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1438,9 +1467,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); if (err) @@ -1451,15 +1482,16 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) goto out; } - rtnl_lock(); err = drv->ops->change_station(&drv->wiphy, dev, mac_addr, ¶ms); - rtnl_unlock(); out: if (params.vlan) dev_put(params.vlan); cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1501,9 +1533,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) ¶ms.station_flags)) return -EINVAL; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); if (err) @@ -1514,15 +1548,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) goto out; } - rtnl_lock(); err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, ¶ms); - rtnl_unlock(); out: if (params.vlan) dev_put(params.vlan); cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1536,22 +1571,25 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->del_station) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->del_station(&drv->wiphy, dev, mac_addr); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1632,14 +1670,18 @@ static int nl80211_dump_mpath(struct sk_buff *skb, return -EINVAL; } - netdev = dev_get_by_index(&init_net, ifidx); - if (!netdev) - return -ENODEV; + rtnl_lock(); + + netdev = __dev_get_by_index(&init_net, ifidx); + if (!netdev) { + err = -ENODEV; + goto out_rtnl; + } dev = cfg80211_get_dev_from_ifindex(ifidx); if (IS_ERR(dev)) { err = PTR_ERR(dev); - goto out_put_netdev; + goto out_rtnl; } if (!dev->ops->dump_mpath) { @@ -1647,15 +1689,13 @@ static int nl80211_dump_mpath(struct sk_buff *skb, goto out_err; } - rtnl_lock(); - while (1) { err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, dst, next_hop, &pinfo); if (err == -ENOENT) break; if (err) - goto out_err_rtnl; + goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, @@ -1670,12 +1710,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb, out: cb->args[1] = path_idx; err = skb->len; - out_err_rtnl: - rtnl_unlock(); out_err: cfg80211_put_dev(dev); - out_put_netdev: - dev_put(netdev); + out_rtnl: + rtnl_unlock(); return err; } @@ -1697,19 +1735,18 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->get_mpath) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo); - rtnl_unlock(); - if (err) goto out; @@ -1726,10 +1763,12 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) out_free: nlmsg_free(msg); - out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1750,22 +1789,25 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->change_mpath) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) @@ -1785,22 +1827,25 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->add_mpath) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1814,22 +1859,25 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_MAC]) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->del_mpath) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->del_mpath(&drv->wiphy, dev, dst); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1862,22 +1910,25 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); } + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->change_bss) { err = -EOPNOTSUPP; goto out; } - rtnl_lock(); err = drv->ops->change_bss(&drv->wiphy, dev, ¶ms); - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -1972,10 +2023,12 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, struct nlattr *pinfoattr; struct sk_buff *msg; + rtnl_lock(); + /* Look up our device */ err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->get_mesh_params) { err = -EOPNOTSUPP; @@ -1983,9 +2036,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, } /* Get the mesh params */ - rtnl_lock(); err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params); - rtnl_unlock(); if (err) goto out; @@ -2034,13 +2085,16 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, err = genlmsg_unicast(msg, info->snd_pid); goto out; -nla_put_failure: + nla_put_failure: genlmsg_cancel(msg, hdr); err = -EMSGSIZE; -out: + out: /* Cleanup */ cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -2087,9 +2141,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) parent_attr, nl80211_meshconf_params_policy)) return -EINVAL; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; if (!drv->ops->set_mesh_params) { err = -EOPNOTSUPP; @@ -2136,14 +2192,15 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) nla_get_u16); /* Apply changes */ - rtnl_lock(); err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask); - rtnl_unlock(); out: /* cleanup */ cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -2310,19 +2367,22 @@ static int nl80211_set_mgmt_extra_ie(struct sk_buff *skb, params.ies_len = nla_len(info->attrs[NL80211_ATTR_IE]); } + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; - if (drv->ops->set_mgmt_extra_ie) { - rtnl_lock(); + if (drv->ops->set_mgmt_extra_ie) err = drv->ops->set_mgmt_extra_ie(&drv->wiphy, dev, ¶ms); - rtnl_unlock(); - } else + else err = -EOPNOTSUPP; cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } @@ -2339,9 +2399,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) enum ieee80211_band band; size_t ie_len; + rtnl_lock(); + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); if (err) - return err; + goto out_rtnl; wiphy = &drv->wiphy; @@ -2350,11 +2412,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) goto out; } - rtnl_lock(); - if (drv->scan_req) { err = -EBUSY; - goto out_unlock; + goto out; } if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { @@ -2362,7 +2422,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) n_channels++; if (!n_channels) { err = -EINVAL; - goto out_unlock; + goto out; } } else { for (band = 0; band < IEEE80211_NUM_BANDS; band++) @@ -2376,7 +2436,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) if (n_ssids > wiphy->max_scan_ssids) { err = -EINVAL; - goto out_unlock; + goto out; } if (info->attrs[NL80211_ATTR_IE]) @@ -2390,7 +2450,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) + ie_len, GFP_KERNEL); if (!request) { err = -ENOMEM; - goto out_unlock; + goto out; } request->channels = (void *)((char *)request + sizeof(*request)); @@ -2461,11 +2521,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) drv->scan_req = NULL; kfree(request); } - out_unlock: - rtnl_unlock(); out: cfg80211_put_dev(drv); dev_put(dev); + out_rtnl: + rtnl_unlock(); + return err; } -- cgit v0.10.2 From 019fb97d47896c0ead4a77f55e5350c2750f675f Mon Sep 17 00:00:00 2001 From: Mohamed Abbas Date: Tue, 17 Mar 2009 21:59:18 -0700 Subject: iwlagn: use changed in mac_config In function iwl_mac_config use changed flag to call only the affected functions. This patch also allow user to cache channel, txpower and power value when the interface is not ready and apply the changes once the interface ready. Signed-off-by: Mohamed Abbas Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 0db3bc0..4d2ed52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1567,9 +1567,8 @@ static void iwl_alive_start(struct iwl_priv *priv) if (iwl_is_associated(priv)) { struct iwl_rxon_cmd *active_rxon = (struct iwl_rxon_cmd *)&priv->active_rxon; - - memcpy(&priv->staging_rxon, &priv->active_rxon, - sizeof(priv->staging_rxon)); + /* apply any changes in staging */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; } else { /* Initialize our rx_config data */ @@ -2184,110 +2183,112 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) struct iwl_priv *priv = hw->priv; const struct iwl_channel_info *ch_info; struct ieee80211_conf *conf = &hw->conf; - unsigned long flags; + unsigned long flags = 0; int ret = 0; - u16 channel; + u16 ch; + int scan_active = 0; mutex_lock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "enter to channel %d\n", conf->channel->hw_value); - - priv->current_ht_config.is_ht = conf_is_ht(conf); + IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", + conf->channel->hw_value, changed); - if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n"); - goto out; + if (unlikely(!priv->cfg->mod_params->disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + scan_active = 1; + IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); } - if (!conf->radio_enabled) - iwl_radio_kill_sw_disable_radio(priv); - if (!iwl_is_ready(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - ret = -EIO; - goto out; - } + /* during scanning mac80211 will delay channel setting until + * scan finish with changed = 0 + */ + if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (scan_active) + goto set_ch_out; + + ch = ieee80211_frequency_to_channel(conf->channel->center_freq); + ch_info = iwl_get_channel_info(priv, conf->channel->band, ch); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); + ret = -EINVAL; + goto set_ch_out; + } - if (unlikely(!priv->cfg->mod_params->disable_hw_scan && - test_bit(STATUS_SCANNING, &priv->status))) { - IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); - mutex_unlock(&priv->mutex); - return 0; - } + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + !is_channel_ibss(ch_info)) { + IWL_ERR(priv, "channel %d in band %d not " + "IBSS channel\n", + conf->channel->hw_value, conf->channel->band); + ret = -EINVAL; + goto set_ch_out; + } - channel = ieee80211_frequency_to_channel(conf->channel->center_freq); - ch_info = iwl_get_channel_info(priv, conf->channel->band, channel); - if (!is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); - ret = -EINVAL; - goto out; - } + priv->current_ht_config.is_ht = conf_is_ht(conf); - if (priv->iw_mode == NL80211_IFTYPE_ADHOC && - !is_channel_ibss(ch_info)) { - IWL_ERR(priv, "channel %d in band %d not IBSS channel\n", - conf->channel->hw_value, conf->channel->band); - ret = -EINVAL; - goto out; - } + spin_lock_irqsave(&priv->lock, flags); - spin_lock_irqsave(&priv->lock, flags); + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) + priv->staging_rxon.flags = 0; - /* if we are switching from ht to 2.4 clear flags - * from any ht related info since 2.4 does not - * support ht */ - if ((le16_to_cpu(priv->staging_rxon.channel) != channel) -#ifdef IEEE80211_CONF_CHANNEL_SWITCH - && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) -#endif - ) - priv->staging_rxon.flags = 0; + iwl_set_rxon_channel(priv, conf->channel); + + iwl_set_flags_for_band(priv, conf->channel->band); + spin_unlock_irqrestore(&priv->lock, flags); + set_ch_out: + /* The list of supported rates and rate mask can be different + * for each band; since the band may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_set_rate(priv); + } - iwl_set_rxon_channel(priv, conf->channel); + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (conf->flags & IEEE80211_CONF_PS) + ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3); + else + ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM); + if (ret) + IWL_DEBUG_MAC80211(priv, "Error setting power level\n"); - iwl_set_flags_for_band(priv, conf->channel->band); + } - /* The list of supported rates and rate mask can be different - * for each band; since the band may have changed, reset - * the rate mask to what mac80211 lists */ - iwl_set_rate(priv); + if (changed & IEEE80211_CONF_CHANGE_POWER) { + IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", + priv->tx_power_user_lmt, conf->power_level); - spin_unlock_irqrestore(&priv->lock, flags); + iwl_set_tx_power(priv, conf->power_level, false); + } -#ifdef IEEE80211_CONF_CHANNEL_SWITCH - if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { - iwl_hw_channel_switch(priv, conf->channel); - goto out; + /* call to ensure that 4965 rx_chain is set properly in monitor mode */ + iwl_set_rxon_chain(priv); + + if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) { + if (conf->radio_enabled && + iwl_radio_kill_sw_enable_radio(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - " + "waiting for uCode\n"); + goto out; + } + + if (!conf->radio_enabled) + iwl_radio_kill_sw_disable_radio(priv); } -#endif if (!conf->radio_enabled) { IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); goto out; } - if (iwl_is_rfkill(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF kill\n"); - ret = -EIO; + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); goto out; } - if (conf->flags & IEEE80211_CONF_PS) - ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3); - else - ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM); - if (ret) - IWL_DEBUG_MAC80211(priv, "Error setting power level\n"); - - IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", - priv->tx_power_user_lmt, conf->power_level); - - iwl_set_tx_power(priv, conf->power_level, false); - - iwl_set_rate(priv); - - /* call to ensure that 4965 rx_chain is set properly in monitor mode */ - iwl_set_rxon_chain(priv); + if (scan_active) + goto out; if (memcmp(&priv->active_rxon, &priv->staging_rxon, sizeof(priv->staging_rxon))) @@ -2295,9 +2296,9 @@ static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) else IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n"); - IWL_DEBUG_MAC80211(priv, "leave\n"); out: + IWL_DEBUG_MAC80211(priv, "leave\n"); mutex_unlock(&priv->mutex); return ret; } diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 085e9cf..bcdecb1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1437,6 +1437,10 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) priv->tx_power_user_lmt = tx_power; + /* if nic is not up don't send command */ + if (!iwl_is_ready_rf(priv)) + return ret; + if (force && priv->cfg->ops->lib->send_tx_power) ret = priv->cfg->ops->lib->send_tx_power(priv); -- cgit v0.10.2 From 37fec3846a5a8b098e35c44ee858407bab0df43f Mon Sep 17 00:00:00 2001 From: Mohamed Abbas Date: Tue, 17 Mar 2009 21:51:42 -0700 Subject: iwl3945: use changed in iwl3945_mac_config In function iwl3945_mac_config use changed flag to call only the affected functions. Signed-off-by: Mohamed Abbas Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 4465320..16ecf03 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -3773,15 +3773,19 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed) } #endif - if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - waiting for uCode\n"); - goto out; - } + if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) { + if (conf->radio_enabled && + iwl_radio_kill_sw_enable_radio(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - " + "waiting for uCode\n"); + goto out; + } - if (!conf->radio_enabled) { - iwl_radio_kill_sw_disable_radio(priv); - IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); - goto out; + if (!conf->radio_enabled) { + iwl_radio_kill_sw_disable_radio(priv); + IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n"); + goto out; + } } if (iwl_is_rfkill(priv)) { -- cgit v0.10.2 From 5c2207c64209be2fe0d6b43ada2e41b28a948015 Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Tue, 17 Mar 2009 21:51:43 -0700 Subject: iwlwifi: return 0 for AMPDU_TX/RX_STOP request if NIC is going down When receive IEEE80211_AMPDU_RX_STOP or IEEE80211_AMPDU_TX_STOP request in iwl_mac_ampdu_action() from mac80211; check STATUS_EXIT_PENDING bit, if NIC is on the way out, then return 0 back to mac80211, this can prevent mac80211 report HW error incorrectly. Signed-off-by: Wey-Yi Guy Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 4d2ed52..c148285 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2683,6 +2683,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_priv *priv = hw->priv; + int ret; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); @@ -2696,13 +2697,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); case IEEE80211_AMPDU_RX_STOP: IWL_DEBUG_HT(priv, "stop Rx\n"); - return iwl_sta_rx_agg_stop(priv, sta->addr, tid); + ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else + return ret; case IEEE80211_AMPDU_TX_START: IWL_DEBUG_HT(priv, "start Tx\n"); return iwl_tx_agg_start(priv, sta->addr, tid, ssn); case IEEE80211_AMPDU_TX_STOP: IWL_DEBUG_HT(priv, "stop Tx\n"); - return iwl_tx_agg_stop(priv, sta->addr, tid); + ret = iwl_tx_agg_stop(priv, sta->addr, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else + return ret; default: IWL_DEBUG_HT(priv, "unknown\n"); return -EINVAL; -- cgit v0.10.2 From 4f01ac01539d83709d6ae314fc172da7b7e70456 Mon Sep 17 00:00:00 2001 From: Mohamed Abbas Date: Tue, 17 Mar 2009 21:51:44 -0700 Subject: iwlagn: allow power level setting all the times allow user to set power level at all times Signed-off-by: Mohamed Abbas Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c148285..663dc83 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3093,11 +3093,6 @@ static ssize_t store_power_level(struct device *d, mutex_lock(&priv->mutex); - if (!iwl_is_ready(priv)) { - ret = -EAGAIN; - goto out; - } - ret = strict_strtoul(buf, 10, &mode); if (ret) goto out; diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 16ecf03..bd59ed4 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -4550,11 +4550,6 @@ static ssize_t store_power_level(struct device *d, mutex_lock(&priv->mutex); - if (!iwl_is_ready(priv)) { - ret = -EAGAIN; - goto out; - } - ret = strict_strtoul(buf, 10, &mode); if (ret) goto out; -- cgit v0.10.2 From 28c608750f5f72e3c4139f7a51358eccd58c80a9 Mon Sep 17 00:00:00 2001 From: Mohamed Abbas Date: Tue, 17 Mar 2009 21:51:45 -0700 Subject: iwlcore: dont commit power command if interface is not up If user set new power level, accept the new power level and only send command to host if the interface is up and radio on. Signed-off-by: Mohamed Abbas Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 18b7e41..47c8945 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -273,7 +273,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force) if (priv->iw_mode != NL80211_IFTYPE_STATION) final_mode = IWL_POWER_MODE_CAM; - if (!iwl_is_rfkill(priv) && !setting->power_disabled && + if (iwl_is_ready_rf(priv) && !setting->power_disabled && ((setting->power_mode != final_mode) || force)) { struct iwl_powertable_cmd cmd; -- cgit v0.10.2 From b1c6019bc0fe829309258d888f47d9ae54353039 Mon Sep 17 00:00:00 2001 From: Mohamed Abbas Date: Tue, 17 Mar 2009 21:51:47 -0700 Subject: iwlwifi: support 11h Set IEEE80211_HW_SPECTRUM_MGMT bit in hw->flags, this tell mac80211 we support spectrum mgmt. Signed-off-by: Mohamed Abbas Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index bcdecb1..4b1298c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1298,6 +1298,7 @@ int iwl_setup_mac(struct iwl_priv *priv) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM | IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_SUPPORTS_PS; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index bd59ed4..ec44645 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -4904,7 +4904,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_NOISE_DBM; + IEEE80211_HW_NOISE_DBM | + IEEE80211_HW_SPECTRUM_MGMT; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | -- cgit v0.10.2 From 21c02a1ab2d4b4a439461140e1ac355db32c3f2b Mon Sep 17 00:00:00 2001 From: Abhijeet Kolekar Date: Tue, 17 Mar 2009 21:51:48 -0700 Subject: iwl3945: set TFD_QUEUE_MAX to correct value Total number of queues is 8 but only 7 of them are TX queues. 4 AC(Data) queue ,1 CMD and 2 HCCA. The HCCA queues are not used. max_txq_num is set to maximum usable TX queues. Signed-off-by: Abhijeet Kolekar Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h index 205603d..73f93a0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h @@ -233,7 +233,7 @@ struct iwl3945_eeprom { #define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ #define TFD_QUEUE_MIN 0 -#define TFD_QUEUE_MAX 6 +#define TFD_QUEUE_MAX 5 /* 4 DATA + 1 CMD */ #define IWL_NUM_SCAN_RATES (2) diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index ba7e720..99bb48e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -1046,7 +1046,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) goto error; /* Tx queue(s) */ - for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { + for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) { slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, @@ -1239,7 +1239,7 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) int txq_id; /* Tx queues */ - for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) + for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) iwl_tx_queue_free(priv, txq_id); } @@ -1259,7 +1259,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); /* reset TFD queues */ - for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { + for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) { iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), @@ -2488,6 +2488,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv) return -ENOMEM; } + /* Assign number of Usable TX queues */ + priv->hw_params.max_txq_num = TFD_QUEUE_MAX; + priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K; priv->hw_params.max_pkt_size = 2342; -- cgit v0.10.2 From 3e5d238fa75783e1080e7413c7e36dd5203950eb Mon Sep 17 00:00:00 2001 From: Abhijeet Kolekar Date: Tue, 17 Mar 2009 21:51:49 -0700 Subject: iwl3945: use iwl_cmd_queue_free iwl_cmd_queue_free needs to be used to free up the cmd_queue, as TFD slots for cmd_queue and tx_queue are different. Signed-off-by: Abhijeet Kolekar Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 99bb48e..50c61ed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -747,11 +747,6 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) int i; int counter; - /* classify bd */ - if (txq->q.id == IWL_CMD_QUEUE_NUM) - /* nothing to cleanup after for host commands */ - return; - /* sanity check */ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); if (counter > NUM_TFD_CHUNKS) { @@ -1240,7 +1235,11 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) /* Tx queues */ for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++) - iwl_tx_queue_free(priv, txq_id); + if (txq_id == IWL_CMD_QUEUE_NUM) + iwl_cmd_queue_free(priv); + else + iwl_tx_queue_free(priv, txq_id); + } void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 27310fe..a8eac8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -264,6 +264,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv, * RX ******************************************************/ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +void iwl_cmd_queue_free(struct iwl_priv *priv); int iwl_rx_queue_alloc(struct iwl_priv *priv); void iwl_rx_handle(struct iwl_priv *priv); int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index dff60fb..9e83ee2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -174,7 +174,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free); * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ -static void iwl_cmd_queue_free(struct iwl_priv *priv) +void iwl_cmd_queue_free(struct iwl_priv *priv) { struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; struct iwl_queue *q = &txq->q; @@ -193,12 +193,14 @@ static void iwl_cmd_queue_free(struct iwl_priv *priv) /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) - pci_free_consistent(dev, sizeof(struct iwl_tfd) * + pci_free_consistent(dev, priv->hw_params.tfd_size * txq->q.n_bd, txq->tfds, txq->q.dma_addr); /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } +EXPORT_SYMBOL(iwl_cmd_queue_free); + /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services * -- cgit v0.10.2 From 1e680233e7edfd081ebf9ec54e118547d5de7a8c Mon Sep 17 00:00:00 2001 From: Abhijeet Kolekar Date: Tue, 17 Mar 2009 21:51:50 -0700 Subject: iwl3945: fix checkpatch.pl errors Patch fixes two checkpatch.pl errors. Signed-off-by: Abhijeet Kolekar Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 50c61ed..d03f553 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -1179,7 +1179,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); - if(rc) + if (rc) return rc; priv->cfg->ops->lib->apm_ops.config(priv); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index ec44645..d61f9a0 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -560,7 +560,7 @@ static int iwl3945_set_dynamic_key(struct iwl_priv *priv, ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); break; default: - IWL_ERR(priv,"Unknown alg: %s alg = %d\n", __func__, keyconf->alg); + IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); ret = -EINVAL; } -- cgit v0.10.2 From 82127493a656f6293ffb1566410b5753f29991ef Mon Sep 17 00:00:00 2001 From: Abhijeet Kolekar Date: Tue, 17 Mar 2009 21:51:51 -0700 Subject: iwl3945: control rate decrease Control the rate decrease. Do not decrease the rate fast. Use success_ratio for rate scaling :) Signed-off-by: Abhijeet Kolekar Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index f65c308..af6b9d4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -124,7 +124,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { #define IWL39_RATE_HIGH_TH 11520 #define IWL_SUCCESS_UP_TH 8960 #define IWL_SUCCESS_DOWN_TH 10880 -#define IWL_RATE_MIN_FAILURE_TH 8 +#define IWL_RATE_MIN_FAILURE_TH 6 #define IWL_RATE_MIN_SUCCESS_TH 8 #define IWL_RATE_DECREASE_TH 1920 #define IWL_RATE_RETRY_TH 15 @@ -488,7 +488,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband IWL_DEBUG_RATE(priv, "enter\n"); - retries = info->status.rates[0].count - 1; + retries = info->status.rates[0].count; /* Sanity Check for retries */ if (retries > IWL_RATE_RETRY_TH) retries = IWL_RATE_RETRY_TH; @@ -791,16 +791,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); scale_action = -1; - /* No throughput measured yet for adjacent rates, * try increase */ } else if ((low_tpt == IWL_INVALID_VALUE) && (high_tpt == IWL_INVALID_VALUE)) { - if (high != IWL_RATE_INVALID && window->success_counter >= IWL_RATE_INCREASE_TH) + if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH) scale_action = 1; else if (low != IWL_RATE_INVALID) - scale_action = -1; + scale_action = 0; /* Both adjacent throughputs are measured, but neither one has * better throughput; we're using the best rate, don't change @@ -826,14 +825,14 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, else { IWL_DEBUG_RATE(priv, "decrease rate because of high tpt\n"); - scale_action = -1; + scale_action = 0; } } else if (low_tpt != IWL_INVALID_VALUE) { if (low_tpt > current_tpt) { IWL_DEBUG_RATE(priv, "decrease rate because of low tpt\n"); scale_action = -1; - } else if (window->success_counter >= IWL_RATE_INCREASE_TH) { + } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) { /* Lower rate has better * throughput,decrease rate */ scale_action = 1; -- cgit v0.10.2 From a2f1cbebdccc866d6c7da9eb655d35b5c60d33a0 Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Tue, 17 Mar 2009 21:51:52 -0700 Subject: iwlwifi: report error when detect failure during stop agg queue This fix related to bug 1921 at http://www.intellinuxwireless.org/bugzilla/show_bug.cgi?id=1921 when detect error during stopping tx aggregation queue, report the error to help identify the problem. Signed-off-by: Wey-Yi Guy Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 08c19be..a3d9a95 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -1077,7 +1077,7 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { - IWL_WARN(priv, + IWL_ERR(priv, "queue number out of range: %d, must be %d to %d\n", txq_id, IWL50_FIRST_AMPDU_QUEUE, IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 1684490..5798fe4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -1138,8 +1138,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid) int sta_id; sta_id = iwl_find_station(priv, addr); - if (sta_id == IWL_INVALID_STATION) + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); return -ENXIO; + } spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.station_flags_msk = 0; diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 9e83ee2..b13862a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -1223,8 +1223,10 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) sta_id = iwl_find_station(priv, ra); - if (sta_id == IWL_INVALID_STATION) + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); return -ENXIO; + } if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); -- cgit v0.10.2 From 43da9192326a4499b5faf737c3636f25b56b53e0 Mon Sep 17 00:00:00 2001 From: Abhijeet Kolekar Date: Tue, 17 Mar 2009 21:51:53 -0700 Subject: iwl3945: replace stations with stations_39 A *leftover* stations is replaced with stations_39. Signed-off-by: Abhijeet Kolekar Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index d61f9a0..ede29b6 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -485,14 +485,14 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key, keyconf->keylen); - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + if ((priv->stations_39[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = + priv->stations_39[sta_id].sta.key.key_offset = iwl_get_free_ucode_key_index(priv); /* else, we are overriding an existing key => no need to allocated room * in uCode. */ - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + WARN(priv->stations_39[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); priv->stations_39[sta_id].sta.key.key_flags = key_flags; -- cgit v0.10.2 From a9a6ffffd05f97e6acbdeafc595e269855829751 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 18 Mar 2009 14:06:44 +0200 Subject: mac80211: don't drop nullfunc frames during software scan ieee80211_tx_h_check_assoc() was dropping everything else than probe requests during software scan. So the nullfunc frame with the power save bit was dropped and AP never received it. This meant that AP never buffered any frames for the station during software scan. Fix this by allowing to transmit both probe request and nullfunc frames during software scan. Tested with stlc45xx. Signed-off-by: Kalle Valo Acked-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 5030a3c..46f35dc 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -409,6 +409,19 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, return 0; } + /* + * Hardware/driver doesn't support hw_scan, so use software + * scanning instead. First send a nullfunc frame with power save + * bit on so that AP will buffer the frames for us while we are not + * listening, then send probe requests to each channel and wait for + * the responses. After all channels are scanned, tune back to the + * original channel and send a nullfunc frame with power save bit + * off to trigger the AP to send us all the buffered frames. + * + * Note that while local->sw_scanning is true everything else but + * nullfunc frames and probe requests will be dropped in + * ieee80211_tx_h_check_assoc(). + */ local->sw_scanning = true; if (local->ops->sw_scan_start) local->ops->sw_scan_start(local_to_hw(local)); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 038460b..f3f240c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -193,7 +193,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) return TX_CONTINUE; if (unlikely(tx->local->sw_scanning) && - !ieee80211_is_probe_req(hdr->frame_control)) + !ieee80211_is_probe_req(hdr->frame_control) && + !ieee80211_is_nullfunc(hdr->frame_control)) + /* + * When software scanning only nullfunc frames (to notify + * the sleep state to the AP) and probe requests (for the + * active scan) are allowed, all other frames should not be + * sent and we should not get here, but if we do + * nonetheless, drop them to avoid sending them + * off-channel. See the link below and + * ieee80211_start_scan() for more. + * + * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 + */ return TX_DROP; if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) -- cgit v0.10.2 From b3a902850a8f5bc11a660051faae707f928d4bd6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 18 Mar 2009 14:29:38 +0100 Subject: mac80211: kill IEEE80211_CONF_SHORT_SLOT_TIME No drivers use it any more, so it can now be removed safely. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a38a82c..daa539a 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -520,12 +520,6 @@ enum ieee80211_conf_flags { IEEE80211_CONF_PS = (1<<1), }; -/* XXX: remove all this once drivers stop trying to use it */ -static inline int __deprecated __IEEE80211_CONF_SHORT_SLOT_TIME(void) -{ - return 0; -} -#define IEEE80211_CONF_SHORT_SLOT_TIME (__IEEE80211_CONF_SHORT_SLOT_TIME()) /** * enum ieee80211_conf_changed - denotes which configuration changed -- cgit v0.10.2 From 0934af2340caf3c9f247ae650bf0c6faa4203dba Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Wed, 18 Mar 2009 20:22:00 +0530 Subject: ath9k: Fix rate control update for aggregated frames We will miss rate control update if first A-MPDU of an aggregation is not Block Acked as we always tell if the rate control needs to updated through update_rc of first A-MPDU. This patch does rate control update for the first A-MPDU which notifies it's tx status (which is not necessarily the first A-MPDU of an aggregation) to mac80211. Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 8968abe..f287719 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c @@ -64,6 +64,10 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head); static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); +static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, + int txok); +static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, + int nbad, int txok); /*********************/ /* Aggregation logic */ @@ -274,9 +278,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; struct ath_desc *ds = bf_last->bf_desc; struct list_head bf_head, bf_pending; - u16 seq_st = 0; + u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; u32 ba[WME_BA_BMP_SIZE >> 5]; - int isaggr, txfail, txpending, sendbar = 0, needreset = 0; + int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; + bool rc_update = true; skb = (struct sk_buff *)bf->bf_mpdu; hdr = (struct ieee80211_hdr *)skb->data; @@ -316,6 +321,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, INIT_LIST_HEAD(&bf_pending); INIT_LIST_HEAD(&bf_head); + nbad = ath_tx_num_badfrms(sc, bf, txok); while (bf) { txfail = txpending = 0; bf_next = bf->bf_next; @@ -323,8 +329,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { /* transmit completion, subframe is * acked by block ack */ + acked_cnt++; } else if (!isaggr && txok) { /* transmit completion */ + acked_cnt++; } else { if (!(tid->state & AGGR_CLEANUP) && ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { @@ -335,6 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, bf->bf_state.bf_type |= BUF_XRETRY; txfail = 1; sendbar = 1; + txfail_cnt++; } } else { /* @@ -361,6 +370,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_update_baw(sc, tid, bf->bf_seqno); spin_unlock_bh(&txq->axq_lock); + if (rc_update) + if (acked_cnt == 1 || txfail_cnt == 1) { + ath_tx_rc_status(bf, ds, nbad, txok); + rc_update = false; + } ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); } else { /* retry the un-acked ones */ @@ -1901,7 +1915,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) struct ath_buf *bf, *lastbf, *bf_held = NULL; struct list_head bf_head; struct ath_desc *ds; - int txok, nbad = 0; + int txok; int status; DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", @@ -1995,13 +2009,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) bf->bf_retries = ds->ds_txstat.ts_longretry; if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; - nbad = 0; - } else { - nbad = ath_tx_num_badfrms(sc, bf, txok); + ath_tx_rc_status(bf, ds, 0, txok); } - ath_tx_rc_status(bf, ds, nbad, txok); - if (bf_isampdu(bf)) ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); else -- cgit v0.10.2 From 4b4698c443c9db62b220c41a1793872d6ebe82e1 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 19 Mar 2009 13:39:19 +0200 Subject: mac80211: Fix a typo in assoc vs. reassoc check Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a558796..4c753bb 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -103,7 +103,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) u32 rates = 0; size_t e_ies_len; - if (ifmgd->flags & IEEE80211_IBSS_PREV_BSSID_SET) { + if (ifmgd->flags & IEEE80211_STA_PREV_BSSID_SET) { e_ies = sdata->u.mgd.ie_reassocreq; e_ies_len = sdata->u.mgd.ie_reassocreq_len; } else { -- cgit v0.10.2 From a299542e97ec1939fdca7db6d3d82c0aa9bf8b9a Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 19 Mar 2009 13:39:20 +0200 Subject: mac80211: Fix reassociation by not clearing previous BSSID We must not clear the previous BSSID when roaming to another AP within the same ESS for reassociation to be used properly. It is fine to clear this when the SSID changes, so let's move the code into ieee80211_sta_set_ssid(). Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4c753bb..1f49b63 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1888,8 +1888,6 @@ int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET; - if (ifmgd->ssid_len) ifmgd->flags |= IEEE80211_STA_SSID_SET; else @@ -1908,6 +1906,10 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size ifmgd = &sdata->u.mgd; if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) { + /* + * Do not use reassociation if SSID is changed (different ESS). + */ + ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET; memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid)); memcpy(ifmgd->ssid, ssid, len); ifmgd->ssid_len = len; -- cgit v0.10.2 From 6039f6d23fe792d615da5449e9fa1c6b43caacf6 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 19 Mar 2009 13:39:21 +0200 Subject: nl80211: Event notifications for MLME events Add new nl80211 event notifications (and a new multicast group, "mlme") for informing user space about received and processed Authentication, (Re)Association Response, Deauthentication, and Disassociation frames in station and IBSS modes (i.e., MLME SAP interface primitives MLME-AUTHENTICATE.confirm, MLME-ASSOCIATE.confirm, MLME-REASSOCIATE.confirm, MLME-DEAUTHENTICATE.indicate, and MLME-DISASSOCIATE.indication). The event data is encapsulated as the 802.11 management frame since we already have the frame in that format and it includes all the needed information. This is the initial step in providing MLME SAP interface for authentication and association with nl80211. In other words, kernel code will act as the MLME and a user space application can control it as the SME. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 3700d92..5ce68ae 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -161,6 +161,25 @@ * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on * to (%NL80211_ATTR_REG_ALPHA2). * + * @NL80211_CMD_AUTHENTICATE: authentication notification (on the "mlme" + * multicast group). This event reports reception of an Authentication + * frame in station and IBSS modes when the local MLME processed the + * frame, i.e., it was for the local STA and was received in correct + * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the + * MLME SAP interface (kernel providing MLME, userspace SME). The + * included NL80211_ATTR_FRAME attribute contains the management frame + * (including both the header and frame body, but not FCS). + * @NL80211_CMD_ASSOCIATE: association notification; like + * NL80211_CMD_AUTHENTICATE but for Association Response and Reassociation + * Response frames (similar to MLME-ASSOCIATE.confirm or + * MLME-REASSOCIATE.confirm primitives). + * @NL80211_CMD_DEAUTHENTICATE: deauthentication notification; like + * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to + * MLME-DEAUTHENTICATE.indication primitive). + * @NL80211_CMD_DISASSOCIATE: disassociation notification; like + * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to + * MLME-DISASSOCIATE.indication primitive). + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -217,6 +236,11 @@ enum nl80211_commands { NL80211_CMD_REG_CHANGE, + NL80211_CMD_AUTHENTICATE, + NL80211_CMD_ASSOCIATE, + NL80211_CMD_DEAUTHENTICATE, + NL80211_CMD_DISASSOCIATE, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -230,8 +254,11 @@ enum nl80211_commands { */ #define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS #define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE - #define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE +#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE +#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE +#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE +#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE /** * enum nl80211_attrs - nl80211 netlink attributes @@ -353,6 +380,10 @@ enum nl80211_commands { * an array of command numbers (i.e. a mapping index to command number) * that the driver for the given wiphy supports. * + * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header + * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and + * NL80211_CMD_ASSOCIATE events + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -432,6 +463,8 @@ enum nl80211_attrs { NL80211_ATTR_SUPPORTED_COMMANDS, + NL80211_ATTR_FRAME, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -451,6 +484,7 @@ enum nl80211_attrs { #define NL80211_ATTR_IE NL80211_ATTR_IE #define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR #define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE +#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_REG_RULES 32 diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 50f3fd9..ad44016 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -807,4 +807,50 @@ void cfg80211_put_bss(struct cfg80211_bss *bss); */ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); +/** + * cfg80211_send_rx_auth - notification of processed authentication + * @dev: network device + * @buf: authentication frame (header + body) + * @len: length of the frame data + * + * This function is called whenever an authentication has been processed in + * station mode. + */ +void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_send_rx_assoc - notification of processed association + * @dev: network device + * @buf: (re)association response frame (header + body) + * @len: length of the frame data + * + * This function is called whenever a (re)association response has been + * processed in station mode. + */ +void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_send_rx_deauth - notification of processed deauthentication + * @dev: network device + * @buf: deauthentication frame (header + body) + * @len: length of the frame data + * + * This function is called whenever deauthentication has been processed in + * station mode. + */ +void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, + size_t len); + +/** + * cfg80211_send_rx_disassoc - notification of processed disassociation + * @dev: network device + * @buf: disassociation response frame (header + body) + * @len: length of the frame data + * + * This function is called whenever disassociation has been processed in + * station mode. + */ +void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf, + size_t len); + #endif /* __NET_CFG80211_H */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1f49b63..6dc7a61 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1085,11 +1085,13 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, case WLAN_AUTH_OPEN: case WLAN_AUTH_LEAP: ieee80211_auth_completed(sdata); + cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len); break; case WLAN_AUTH_SHARED_KEY: - if (ifmgd->auth_transaction == 4) + if (ifmgd->auth_transaction == 4) { ieee80211_auth_completed(sdata); - else + cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len); + } else ieee80211_auth_challenge(sdata, mgmt, len); break; } @@ -1125,6 +1127,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, true, false, 0); ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED; + cfg80211_send_rx_deauth(sdata->dev, (u8 *) mgmt, len); } @@ -1154,6 +1157,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, } ieee80211_set_disassoc(sdata, false, false, reason_code); + cfg80211_send_rx_disassoc(sdata->dev, (u8 *) mgmt, len); } @@ -1370,6 +1374,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ieee80211_set_associated(sdata, changed); ieee80211_associated(sdata); + cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len); } diff --git a/net/wireless/Makefile b/net/wireless/Makefile index c157b4d..6d1e7b2 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o -cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o +cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c new file mode 100644 index 0000000..bec5721 --- /dev/null +++ b/net/wireless/mlme.c @@ -0,0 +1,46 @@ +/* + * cfg80211 MLME SAP interface + * + * Copyright (c) 2009, Jouni Malinen + */ + +#include +#include +#include +#include +#include +#include "core.h" +#include "nl80211.h" + +void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + nl80211_send_rx_auth(rdev, dev, buf, len); +} +EXPORT_SYMBOL(cfg80211_send_rx_auth); + +void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + nl80211_send_rx_assoc(rdev, dev, buf, len); +} +EXPORT_SYMBOL(cfg80211_send_rx_assoc); + +void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + nl80211_send_rx_deauth(rdev, dev, buf, len); +} +EXPORT_SYMBOL(cfg80211_send_rx_deauth); + +void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf, + size_t len) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + nl80211_send_rx_disassoc(rdev, dev, buf, len); +} +EXPORT_SYMBOL(cfg80211_send_rx_disassoc); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a3ecf8d..c034c24 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2830,6 +2830,9 @@ static struct genl_ops nl80211_ops[] = { .dumpit = nl80211_dump_scan, }, }; +static struct genl_multicast_group nl80211_mlme_mcgrp = { + .name = "mlme", +}; /* multicast groups */ static struct genl_multicast_group nl80211_config_mcgrp = { @@ -2975,6 +2978,71 @@ nla_put_failure: nlmsg_free(msg); } +static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len, + enum nl80211_commands cmd) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, size_t len) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_AUTHENTICATE); +} + +void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE); +} + +void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_DEAUTHENTICATE); +} + +void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_DISASSOCIATE); +} + /* initialisation/exit functions */ int nl80211_init(void) @@ -3003,6 +3071,10 @@ int nl80211_init(void) if (err) goto err_out; + err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp); + if (err) + goto err_out; + return 0; err_out: genl_unregister_family(&nl80211_fam); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 5b5fe13..b77af4a 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -11,5 +11,17 @@ extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, struct net_device *netdev); extern void nl80211_send_reg_change_event(struct regulatory_request *request); +extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len); +extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len); +extern void nl80211_send_rx_deauth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len); +extern void nl80211_send_rx_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len); #endif /* __NET_WIRELESS_NL80211_H */ -- cgit v0.10.2 From 636a5d3625993c5ca59abc81794b9ded93cdb740 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 19 Mar 2009 13:39:22 +0200 Subject: nl80211: Add MLME primitives to support external SME This patch adds new nl80211 commands to allow user space to request authentication and association (and also deauthentication and disassociation). The commands are structured to allow separate authentication and association steps, i.e., the interface between kernel and user space is similar to the MLME SAP interface in IEEE 802.11 standard and an user space application takes the role of the SME. The patch introduces MLME-AUTHENTICATE.request, MLME-{,RE}ASSOCIATE.request, MLME-DEAUTHENTICATE.request, and MLME-DISASSOCIATE.request primitives. The authentication and association commands request the actual operations in two steps (assuming the driver supports this; if not, separate authentication step is skipped; this could end up being a separate "connect" command). The initial implementation for mac80211 uses the current net/mac80211/mlme.c for actual sending and processing of management frames and the new nl80211 commands will just stop the current state machine from moving automatically from authentication to association. Future cleanup may move more of the MLME operations into cfg80211. The goal of this design is to provide more control of authentication and association process to user space without having to move the full MLME implementation. This should be enough to allow IEEE 802.11r FT protocol and 802.11s SAE authentication to be implemented. Obviously, this will also bring the extra benefit of not having to use WEXT for association requests with mac80211. An example implementation of a user space SME using the new nl80211 commands is available for wpa_supplicant. This patch is enough to get IEEE 802.11r FT protocol working with over-the-air mechanism (over-the-DS will need additional MLME primitives for handling the FT Action frames). Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 382387e..4b501b4 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -867,6 +867,7 @@ struct ieee80211_ht_info { /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_FT 2 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 5ce68ae..9685eaa 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -161,24 +161,37 @@ * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on * to (%NL80211_ATTR_REG_ALPHA2). * - * @NL80211_CMD_AUTHENTICATE: authentication notification (on the "mlme" - * multicast group). This event reports reception of an Authentication + * @NL80211_CMD_AUTHENTICATE: authentication request and notification. + * This command is used both as a command (request to authenticate) and + * as an event on the "mlme" multicast group indicating completion of the + * authentication process. + * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the + * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and + * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify + * the SSID (mainly for association, but is included in authentication + * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used + * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE + * is used to specify the authentication type. %NL80211_ATTR_IE is used to + * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs) + * to be added to the frame. + * When used as an event, this reports reception of an Authentication * frame in station and IBSS modes when the local MLME processed the * frame, i.e., it was for the local STA and was received in correct * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the * MLME SAP interface (kernel providing MLME, userspace SME). The * included NL80211_ATTR_FRAME attribute contains the management frame * (including both the header and frame body, but not FCS). - * @NL80211_CMD_ASSOCIATE: association notification; like - * NL80211_CMD_AUTHENTICATE but for Association Response and Reassociation - * Response frames (similar to MLME-ASSOCIATE.confirm or - * MLME-REASSOCIATE.confirm primitives). - * @NL80211_CMD_DEAUTHENTICATE: deauthentication notification; like + * @NL80211_CMD_ASSOCIATE: association request and notification; like + * NL80211_CMD_AUTHENTICATE but for Association and Reassociation + * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request, + * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). + * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to - * MLME-DEAUTHENTICATE.indication primitive). - * @NL80211_CMD_DISASSOCIATE: disassociation notification; like + * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication + * primitives). + * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to - * MLME-DISASSOCIATE.indication primitive). + * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives). * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use @@ -383,6 +396,11 @@ enum nl80211_commands { * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and * NL80211_CMD_ASSOCIATE events + * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets) + * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type, + * represented as a u32 + * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and + * %NL80211_CMD_DISASSOCIATE, u16 * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -464,6 +482,9 @@ enum nl80211_attrs { NL80211_ATTR_SUPPORTED_COMMANDS, NL80211_ATTR_FRAME, + NL80211_ATTR_SSID, + NL80211_ATTR_AUTH_TYPE, + NL80211_ATTR_REASON_CODE, /* add attributes here, update the policy in nl80211.c */ @@ -485,6 +506,9 @@ enum nl80211_attrs { #define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR #define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE #define NL80211_ATTR_FRAME NL80211_ATTR_FRAME +#define NL80211_ATTR_SSID NL80211_ATTR_SSID +#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE +#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_REG_RULES 32 @@ -1018,4 +1042,18 @@ enum nl80211_bss { NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1 }; +/** + * enum nl80211_auth_type - AuthenticationType + * + * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication + * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only) + * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r) + * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP) + */ +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM, + NL80211_AUTHTYPE_SHARED_KEY, + NL80211_AUTHTYPE_FT, + NL80211_AUTHTYPE_NETWORK_EAP, +}; #endif /* __LINUX_NL80211_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ad44016..0da9a55 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -579,6 +579,105 @@ struct cfg80211_bss { }; /** + * struct cfg80211_auth_request - Authentication request data + * + * This structure provides information needed to complete IEEE 802.11 + * authentication. + * NOTE: This structure will likely change when more code from mac80211 is + * moved into cfg80211 so that non-mac80211 drivers can benefit from it, too. + * Before using this in a driver that does not use mac80211, it would be better + * to check the status of that work and better yet, volunteer to work on it. + * + * @chan: The channel to use or %NULL if not specified (auto-select based on + * scan results) + * @peer_addr: The address of the peer STA (AP BSSID in infrastructure case); + * this field is required to be present; if the driver wants to help with + * BSS selection, it should use (yet to be added) MLME event to allow user + * space SME to be notified of roaming candidate, so that the SME can then + * use the authentication request with the recommended BSSID and whatever + * other data may be needed for authentication/association + * @ssid: SSID or %NULL if not yet available + * @ssid_len: Length of ssid in octets + * @auth_type: Authentication type (algorithm) + * @ie: Extra IEs to add to Authentication frame or %NULL + * @ie_len: Length of ie buffer in octets + */ +struct cfg80211_auth_request { + struct ieee80211_channel *chan; + u8 *peer_addr; + const u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + const u8 *ie; + size_t ie_len; +}; + +/** + * struct cfg80211_assoc_request - (Re)Association request data + * + * This structure provides information needed to complete IEEE 802.11 + * (re)association. + * NOTE: This structure will likely change when more code from mac80211 is + * moved into cfg80211 so that non-mac80211 drivers can benefit from it, too. + * Before using this in a driver that does not use mac80211, it would be better + * to check the status of that work and better yet, volunteer to work on it. + * + * @chan: The channel to use or %NULL if not specified (auto-select based on + * scan results) + * @peer_addr: The address of the peer STA (AP BSSID); this field is required + * to be present and the STA must be in State 2 (authenticated) with the + * peer STA + * @ssid: SSID + * @ssid_len: Length of ssid in octets + * @ie: Extra IEs to add to (Re)Association Request frame or %NULL + * @ie_len: Length of ie buffer in octets + */ +struct cfg80211_assoc_request { + struct ieee80211_channel *chan; + u8 *peer_addr; + const u8 *ssid; + size_t ssid_len; + const u8 *ie; + size_t ie_len; +}; + +/** + * struct cfg80211_deauth_request - Deauthentication request data + * + * This structure provides information needed to complete IEEE 802.11 + * deauthentication. + * + * @peer_addr: The address of the peer STA (AP BSSID); this field is required + * to be present and the STA must be authenticated with the peer STA + * @ie: Extra IEs to add to Deauthentication frame or %NULL + * @ie_len: Length of ie buffer in octets + */ +struct cfg80211_deauth_request { + u8 *peer_addr; + u16 reason_code; + const u8 *ie; + size_t ie_len; +}; + +/** + * struct cfg80211_disassoc_request - Disassociation request data + * + * This structure provides information needed to complete IEEE 802.11 + * disassocation. + * + * @peer_addr: The address of the peer STA (AP BSSID); this field is required + * to be present and the STA must be associated with the peer STA + * @ie: Extra IEs to add to Disassociation frame or %NULL + * @ie_len: Length of ie buffer in octets + */ +struct cfg80211_disassoc_request { + u8 *peer_addr; + u16 reason_code; + const u8 *ie; + size_t ie_len; +}; + +/** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks @@ -650,6 +749,11 @@ struct cfg80211_bss { * the driver, and will be valid until passed to cfg80211_scan_done(). * For scan results, call cfg80211_inform_bss(); you can call this outside * the scan/scan_done bracket too. + * + * @auth: Request to authenticate with the specified peer + * @assoc: Request to (re)associate with the specified peer + * @deauth: Request to deauthenticate from the specified peer + * @disassoc: Request to disassociate from the specified peer */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy); @@ -730,6 +834,15 @@ struct cfg80211_ops { int (*scan)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *request); + + int (*auth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_auth_request *req); + int (*assoc)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req); + int (*deauth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_deauth_request *req); + int (*disassoc)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_disassoc_request *req); }; /* temporary wext handlers */ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 58693e5..223e536 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1300,6 +1300,142 @@ static int ieee80211_scan(struct wiphy *wiphy, return ieee80211_request_scan(sdata, req); } +static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_auth_request *req) +{ + struct ieee80211_sub_if_data *sdata; + + if (!netif_running(dev)) + return -ENETDOWN; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + switch (req->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_SHARED_KEY; + break; + case NL80211_AUTHTYPE_FT: + sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_FT; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_LEAP; + break; + default: + return -EOPNOTSUPP; + } + + memcpy(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN); + sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; + sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET; + + /* TODO: req->chan */ + sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL; + + if (req->ssid) { + sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET; + memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len); + sdata->u.mgd.ssid_len = req->ssid_len; + sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; + } + + kfree(sdata->u.mgd.sme_auth_ie); + sdata->u.mgd.sme_auth_ie = NULL; + sdata->u.mgd.sme_auth_ie_len = 0; + if (req->ie) { + sdata->u.mgd.sme_auth_ie = kmalloc(req->ie_len, GFP_KERNEL); + if (sdata->u.mgd.sme_auth_ie == NULL) + return -ENOMEM; + memcpy(sdata->u.mgd.sme_auth_ie, req->ie, req->ie_len); + sdata->u.mgd.sme_auth_ie_len = req->ie_len; + } + + sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME; + sdata->u.mgd.state = IEEE80211_STA_MLME_DIRECT_PROBE; + ieee80211_sta_req_auth(sdata); + return 0; +} + +static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req) +{ + struct ieee80211_sub_if_data *sdata; + int ret; + + if (!netif_running(dev)) + return -ENETDOWN; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 || + !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED)) + return -ENOLINK; /* not authenticated */ + + sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; + sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET; + + /* TODO: req->chan */ + sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL; + + if (req->ssid) { + sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET; + memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len); + sdata->u.mgd.ssid_len = req->ssid_len; + sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; + } else + sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; + + ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len); + if (ret) + return ret; + + sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME; + sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE; + ieee80211_sta_req_auth(sdata); + return 0; +} + +static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_deauth_request *req) +{ + struct ieee80211_sub_if_data *sdata; + + if (!netif_running(dev)) + return -ENETDOWN; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + /* TODO: req->ie */ + return ieee80211_sta_deauthenticate(sdata, req->reason_code); +} + +static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_disassoc_request *req) +{ + struct ieee80211_sub_if_data *sdata; + + if (!netif_running(dev)) + return -ENETDOWN; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + /* TODO: req->ie */ + return ieee80211_sta_disassociate(sdata, req->reason_code); +} + struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -1333,4 +1469,8 @@ struct cfg80211_ops mac80211_config_ops = { .suspend = ieee80211_suspend, .resume = ieee80211_resume, .scan = ieee80211_scan, + .auth = ieee80211_auth, + .assoc = ieee80211_assoc, + .deauth = ieee80211_deauth, + .disassoc = ieee80211_disassoc, }; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ad12c2a..7b96d95 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -256,6 +256,7 @@ struct mesh_preq_queue { #define IEEE80211_STA_TKIP_WEP_USED BIT(14) #define IEEE80211_STA_CSA_RECEIVED BIT(15) #define IEEE80211_STA_MFP_ENABLED BIT(16) +#define IEEE80211_STA_EXT_SME BIT(17) /* flags for MLME request */ #define IEEE80211_STA_REQ_SCAN 0 #define IEEE80211_STA_REQ_DIRECT_PROBE 1 @@ -266,6 +267,7 @@ struct mesh_preq_queue { #define IEEE80211_AUTH_ALG_OPEN BIT(0) #define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) #define IEEE80211_AUTH_ALG_LEAP BIT(2) +#define IEEE80211_AUTH_ALG_FT BIT(3) struct ieee80211_if_managed { struct timer_list timer; @@ -335,6 +337,9 @@ struct ieee80211_if_managed { size_t ie_deauth_len; u8 *ie_disassoc; size_t ie_disassoc_len; + + u8 *sme_auth_ie; + size_t sme_auth_ie_len; }; enum ieee80211_ibss_flags { @@ -970,7 +975,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_rx_status *rx_status); int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, - char *ie, size_t len); + const char *ie, size_t len); void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); void ieee80211_scan_failed(struct ieee80211_local *local); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 6dc7a61..d1bcc84 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -730,6 +730,8 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; + u8 *ies; + size_t ies_len; ifmgd->auth_tries++; if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) { @@ -755,7 +757,14 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) printk(KERN_DEBUG "%s: authenticate with AP %pM\n", sdata->dev->name, ifmgd->bssid); - ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, NULL, 0, + if (ifmgd->flags & IEEE80211_STA_EXT_SME) { + ies = ifmgd->sme_auth_ie; + ies_len = ifmgd->sme_auth_ie_len; + } else { + ies = NULL; + ies_len = 0; + } + ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, ies, ies_len, ifmgd->bssid, 0); ifmgd->auth_transaction = 2; @@ -870,7 +879,8 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata) int wep_privacy; int privacy_invoked; - if (!ifmgd || (ifmgd->flags & IEEE80211_STA_MIXED_CELL)) + if (!ifmgd || (ifmgd->flags & (IEEE80211_STA_MIXED_CELL | + IEEE80211_STA_EXT_SME))) return 0; bss = ieee80211_rx_bss_get(local, ifmgd->bssid, @@ -998,7 +1008,11 @@ static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata) printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); ifmgd->flags |= IEEE80211_STA_AUTHENTICATED; - ieee80211_associate(sdata); + if (ifmgd->flags & IEEE80211_STA_EXT_SME) { + /* Wait for SME to request association */ + ifmgd->state = IEEE80211_STA_MLME_DISABLED; + } else + ieee80211_associate(sdata); } @@ -1084,6 +1098,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, switch (ifmgd->auth_alg) { case WLAN_AUTH_OPEN: case WLAN_AUTH_LEAP: + case WLAN_AUTH_FT: ieee80211_auth_completed(sdata); cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len); break; @@ -1117,9 +1132,10 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n", sdata->dev->name, reason_code); - if (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE || - ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE || - ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { + if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) && + (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE || + ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE || + ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)) { ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; mod_timer(&ifmgd->timer, jiffies + IEEE80211_RETRY_AUTH_INTERVAL); @@ -1150,7 +1166,8 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", sdata->dev->name, reason_code); - if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { + if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) && + ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE; mod_timer(&ifmgd->timer, jiffies + IEEE80211_RETRY_AUTH_INTERVAL); @@ -1664,6 +1681,8 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata) ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY; else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP) ifmgd->auth_alg = WLAN_AUTH_LEAP; + else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_FT) + ifmgd->auth_alg = WLAN_AUTH_FT; else ifmgd->auth_alg = WLAN_AUTH_OPEN; ifmgd->auth_transaction = -1; @@ -1687,7 +1706,8 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata) u16 capa_val = WLAN_CAPABILITY_ESS; struct ieee80211_channel *chan = local->oper_channel; - if (ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL | + if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) && + ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL | IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL)) { capa_mask |= WLAN_CAPABILITY_PRIVACY; @@ -1884,7 +1904,11 @@ void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata) ieee80211_set_disassoc(sdata, true, true, WLAN_REASON_DEAUTH_LEAVING); - set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); + if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) || + ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE) + set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); + else if (ifmgd->flags & IEEE80211_STA_EXT_SME) + set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); queue_work(local->hw.workqueue, &ifmgd->work); } } @@ -1953,7 +1977,8 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) return ieee80211_sta_commit(sdata); } -int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len) +int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, + const char *ie, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index e55d283..ce21d66 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -137,6 +137,7 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev, if (ret) return ret; sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; + sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; ieee80211_sta_req_auth(sdata); return 0; } @@ -224,6 +225,7 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, if (ret) return ret; + sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; ieee80211_sta_req_auth(sdata); return 0; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) @@ -287,6 +289,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data); if (ret) return ret; + sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; ieee80211_sta_req_auth(sdata); return 0; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c034c24..9e1318d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -111,6 +111,11 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, + + [NL80211_ATTR_SSID] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_SSID_LEN }, + [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, }; /* message building helper */ @@ -265,6 +270,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(set_mesh_params, SET_MESH_PARAMS); CMD(change_bss, SET_BSS); CMD(set_mgmt_extra_ie, SET_MGMT_EXTRA_IE); + CMD(auth, AUTHENTICATE); + CMD(assoc, ASSOCIATE); + CMD(deauth, DEAUTHENTICATE); + CMD(disassoc, DISASSOCIATE); #undef CMD nla_nest_end(msg, nl_cmds); @@ -2646,6 +2655,228 @@ static int nl80211_dump_scan(struct sk_buff *skb, return err; } +static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + struct net_device *dev; + struct cfg80211_auth_request req; + struct wiphy *wiphy; + int err; + + rtnl_lock(); + + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); + if (err) + goto unlock_rtnl; + + if (!drv->ops->auth) { + err = -EOPNOTSUPP; + goto out; + } + + if (!info->attrs[NL80211_ATTR_MAC]) { + err = -EINVAL; + goto out; + } + + wiphy = &drv->wiphy; + memset(&req, 0, sizeof(req)); + + req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { + req.chan = ieee80211_get_channel( + wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); + if (!req.chan) { + err = -EINVAL; + goto out; + } + } + + if (info->attrs[NL80211_ATTR_SSID]) { + req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + } + + if (info->attrs[NL80211_ATTR_IE]) { + req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { + req.auth_type = + nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); + } + + err = drv->ops->auth(&drv->wiphy, dev, &req); + +out: + cfg80211_put_dev(drv); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + struct net_device *dev; + struct cfg80211_assoc_request req; + struct wiphy *wiphy; + int err; + + rtnl_lock(); + + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); + if (err) + goto unlock_rtnl; + + if (!drv->ops->assoc) { + err = -EOPNOTSUPP; + goto out; + } + + if (!info->attrs[NL80211_ATTR_MAC] || + !info->attrs[NL80211_ATTR_SSID]) { + err = -EINVAL; + goto out; + } + + wiphy = &drv->wiphy; + memset(&req, 0, sizeof(req)); + + req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { + req.chan = ieee80211_get_channel( + wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); + if (!req.chan) { + err = -EINVAL; + goto out; + } + } + + if (nla_len(info->attrs[NL80211_ATTR_SSID]) > IEEE80211_MAX_SSID_LEN) { + err = -EINVAL; + goto out; + } + req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + + if (info->attrs[NL80211_ATTR_IE]) { + req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + err = drv->ops->assoc(&drv->wiphy, dev, &req); + +out: + cfg80211_put_dev(drv); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + struct net_device *dev; + struct cfg80211_deauth_request req; + struct wiphy *wiphy; + int err; + + rtnl_lock(); + + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); + if (err) + goto unlock_rtnl; + + if (!drv->ops->deauth) { + err = -EOPNOTSUPP; + goto out; + } + + if (!info->attrs[NL80211_ATTR_MAC]) { + err = -EINVAL; + goto out; + } + + wiphy = &drv->wiphy; + memset(&req, 0, sizeof(req)); + + req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_REASON_CODE]) + req.reason_code = + nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + + if (info->attrs[NL80211_ATTR_IE]) { + req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + err = drv->ops->deauth(&drv->wiphy, dev, &req); + +out: + cfg80211_put_dev(drv); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + struct net_device *dev; + struct cfg80211_disassoc_request req; + struct wiphy *wiphy; + int err; + + rtnl_lock(); + + err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); + if (err) + goto unlock_rtnl; + + if (!drv->ops->disassoc) { + err = -EOPNOTSUPP; + goto out; + } + + if (!info->attrs[NL80211_ATTR_MAC]) { + err = -EINVAL; + goto out; + } + + wiphy = &drv->wiphy; + memset(&req, 0, sizeof(req)); + + req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_REASON_CODE]) + req.reason_code = + nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + + if (info->attrs[NL80211_ATTR_IE]) { + req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + err = drv->ops->disassoc(&drv->wiphy, dev, &req); + +out: + cfg80211_put_dev(drv); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + static struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, @@ -2829,6 +3060,30 @@ static struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .dumpit = nl80211_dump_scan, }, + { + .cmd = NL80211_CMD_AUTHENTICATE, + .doit = nl80211_authenticate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_ASSOCIATE, + .doit = nl80211_associate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEAUTHENTICATE, + .doit = nl80211_deauthenticate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DISASSOCIATE, + .doit = nl80211_disassociate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_multicast_group nl80211_mlme_mcgrp = { .name = "mlme", -- cgit v0.10.2 From 1bf68e5cda40eaa26b186f043340fd283a4fb718 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 19 Mar 2009 13:33:52 +0100 Subject: wireless/p54: P54_SPI should depend on GENERIC_HARDIRQS m68k allmodconfig: | drivers/net/wireless/p54/p54spi.c: In function 'p54spi_probe': | drivers/net/wireless/p54/p54spi.c:675: error: implicit declaration of function | 'set_irq_type' | make[4]: *** [drivers/net/wireless/p54/p54spi.o] Error 1 Signed-off-by: Geert Uytterhoeven Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig index cfc5f41a..0e344ac 100644 --- a/drivers/net/wireless/p54/Kconfig +++ b/drivers/net/wireless/p54/Kconfig @@ -64,7 +64,7 @@ config P54_PCI config P54_SPI tristate "Prism54 SPI (stlc45xx) support" - depends on P54_COMMON && SPI_MASTER + depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS ---help--- This driver is for stlc4550 or stlc4560 based wireless chips. This driver is experimental, untested and will probably only work on -- cgit v0.10.2 From 3e3ccb3d9b8d5a1b65b34e1be2decf213ba3bebb Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Thu, 19 Mar 2009 19:27:21 +0100 Subject: b43: Mask PHY TX error interrupt, if not debugging This masks the PHY TX error interrupt, if debugging is disabled. Currently we have a bug somewhere which triggers this interrupt once in a while. (Depends on the network noise/quality). While this is nonfatal, it scares the hell out of users and we frequently receive bugreports that incorrectly identify this error message as the reason. There's another problem with this. The PHY TX error interrupt is protected with a watchdog that will restart the device if it keeps triggering very often. This is used to fix interrupt storms from completely broken devices. However, this watchdog might trigger in completely normal operation. If the TX capacity of the card is saturated, the likeliness of the watchdog triggering increases, as more TX errors occur. The current threshold for the watchdog is 1000 errors in 15 seconds. This patch adds a workaround for the issue by just enabling the interrupt if debugging is disabled (by Kconfig or by modparam). This has the downside that real fatal PHY TX errors are not caught anymore. But this is nonfatal due to the following reasons: * If the card is not able to transmit anymore, MLME will notice anyway. * I did _never_ see a real fatal PHY TX error in a mainline b43 driver. * It does _not_ result in interrupt storms or something like that. It will simply result in a stalled card. It can be debugged by enabling the debugging module parameter. Signed-off-by: Michael Buesch Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index b72ef3f..4896e08 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -3993,6 +3993,8 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev) dev->irq_reason = 0; memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); dev->irq_savedstate = B43_IRQ_MASKTEMPLATE; + if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) + dev->irq_savedstate &= ~B43_IRQ_PHY_TXERR; dev->mac_suspended = 1; -- cgit v0.10.2 From 827b1fb44b7e41377a5498b9d070a11dfae2c283 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 13 Mar 2009 11:44:18 +0100 Subject: mac80211: resume properly, add suspend/resume test When mac80211 resumes, it currently doesn't reconfigure the interfaces entirely and also doesn't reconfigure BSS information -- fix this. Also, to be able to test this, add a debugfs file that just calls the suspend/resume code to see what happens when we go through that, without needing the time-consuming suspend/resume cycle. (Original version broke the build for CONFIG_PM=n. Define alternative functions for that situation. -- JWL) Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index e37f557..210b9b6 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -40,6 +40,10 @@ static const struct file_operations name## _ops = { \ local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \ local, &name## _ops); +#define DEBUGFS_ADD_MODE(name, mode) \ + local->debugfs.name = debugfs_create_file(#name, mode, phyd, \ + local, &name## _ops); + #define DEBUGFS_DEL(name) \ debugfs_remove(local->debugfs.name); \ local->debugfs.name = NULL; @@ -113,6 +117,24 @@ static const struct file_operations tsf_ops = { .open = mac80211_open_file_generic }; +static ssize_t reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + + rtnl_lock(); + __ieee80211_suspend(&local->hw); + __ieee80211_resume(&local->hw); + rtnl_unlock(); + + return count; +} + +static const struct file_operations reset_ops = { + .write = reset_write, + .open = mac80211_open_file_generic, +}; + /* statistics stuff */ #define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ @@ -254,6 +276,7 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(tsf); + DEBUGFS_ADD_MODE(reset, 0200); statsd = debugfs_create_dir("statistics", phyd); local->debugfs.statistics = statsd; @@ -308,6 +331,7 @@ void debugfs_hw_del(struct ieee80211_local *local) DEBUGFS_DEL(total_ps_buffered); DEBUGFS_DEL(wep_iv); DEBUGFS_DEL(tsf); + DEBUGFS_DEL(reset); DEBUGFS_STATS_DEL(transmitted_fragment_count); DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 7b96d95..547cfac 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -780,6 +780,7 @@ struct ieee80211_local { struct dentry *total_ps_buffered; struct dentry *wep_iv; struct dentry *tsf; + struct dentry *reset; struct dentry *statistics; struct local_debugfsdentries_statsdentries { struct dentry *transmitted_fragment_count; @@ -1059,8 +1060,19 @@ void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, u8 pwr_constr_elem_len); /* Suspend/resume */ +#ifdef CONFIG_PM int __ieee80211_suspend(struct ieee80211_hw *hw); int __ieee80211_resume(struct ieee80211_hw *hw); +#else +static inline int __ieee80211_suspend(struct ieee80211_hw *hw) +{ + return 0; +} +static inline int __ieee80211_resume(struct ieee80211_hw *hw) +{ + return 0; +} +#endif /* utility functions/constants */ extern void *mac80211_wiphy_privid; /* for wiphy privid */ diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 1e6152a..0273023 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -143,6 +143,35 @@ int __ieee80211_resume(struct ieee80211_hw *hw) ieee80211_configure_filter(local); netif_addr_unlock_bh(local->mdev); + /* Finally also reconfigure all the BSS information */ + list_for_each_entry(sdata, &local->interfaces, list) { + u32 changed = ~0; + if (!netif_running(sdata->dev)) + continue; + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + /* disable beacon change bits */ + changed &= ~IEEE80211_IFCC_BEACON; + /* fall through */ + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + WARN_ON(ieee80211_if_config(sdata, changed)); + ieee80211_bss_info_change_notify(sdata, ~0); + break; + case NL80211_IFTYPE_WDS: + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + /* ignore virtual */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + WARN_ON(1); + break; + } + } + ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_SUSPEND); -- cgit v0.10.2 From 8a92e2ee02dee127d309c73969aeb2a56567c9a0 Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Fri, 20 Mar 2009 15:27:49 +0530 Subject: ath9k: Fix bug in reporting status of tx rate This patch updates count of every hw tried rate with appropriate tries before reporting tx status of a frame. Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index f287719..21e90bc 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c @@ -67,7 +67,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, int txok); static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, - int nbad, int txok); + int nbad, int txok, bool update_rc); /*********************/ /* Aggregation logic */ @@ -370,11 +370,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_update_baw(sc, tid, bf->bf_seqno); spin_unlock_bh(&txq->axq_lock); - if (rc_update) - if (acked_cnt == 1 || txfail_cnt == 1) { - ath_tx_rc_status(bf, ds, nbad, txok); - rc_update = false; - } + if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { + ath_tx_rc_status(bf, ds, nbad, txok, true); + rc_update = false; + } else { + ath_tx_rc_status(bf, ds, nbad, txok, false); + } + ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); } else { /* retry the un-acked ones */ @@ -1779,8 +1781,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, tx_info->flags |= IEEE80211_TX_STAT_ACK; } - tx_info->status.rates[0].count = tx_status->retries + 1; - hdrlen = ieee80211_get_hdrlen_from_skb(skb); padsize = hdrlen & 3; if (padsize && hdrlen >= 24) { @@ -1867,30 +1867,39 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, } static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, - int nbad, int txok) + int nbad, int txok, bool update_rc) { struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); + struct ieee80211_hw *hw = tx_info_priv->aphy->hw; + u8 i, tx_rateindex; if (txok) tx_info->status.ack_signal = ds->ds_txstat.ts_rssi; - tx_info_priv->update_rc = false; + tx_rateindex = ds->ds_txstat.ts_rateindex; + WARN_ON(tx_rateindex >= hw->max_rates); + + tx_info_priv->update_rc = update_rc; if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && - (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { + (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { if (ieee80211_is_data(hdr->frame_control)) { memcpy(&tx_info_priv->tx, &ds->ds_txstat, sizeof(tx_info_priv->tx)); tx_info_priv->n_frames = bf->bf_nframes; tx_info_priv->n_bad_frames = nbad; - tx_info_priv->update_rc = true; } } + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) + tx_info->status.rates[i].count = 0; + + tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; } static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) @@ -2009,7 +2018,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) bf->bf_retries = ds->ds_txstat.ts_longretry; if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; - ath_tx_rc_status(bf, ds, 0, txok); + ath_tx_rc_status(bf, ds, 0, txok, true); } if (bf_isampdu(bf)) -- cgit v0.10.2 From 6b2c40326f9569283444d483448bcaadeca903e9 Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Fri, 20 Mar 2009 15:27:50 +0530 Subject: ath9k: Nuke struct ath_xmit_status Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h index 2b02564..2689a08a 100644 --- a/drivers/net/wireless/ath9k/ath9k.h +++ b/drivers/net/wireless/ath9k/ath9k.h @@ -295,13 +295,9 @@ struct ath_tx_control { enum ath9k_internal_frame_type frame_type; }; -struct ath_xmit_status { - int retries; - int flags; #define ATH_TX_ERROR 0x01 #define ATH_TX_XRETRY 0x02 #define ATH_TX_BAR 0x04 -}; /* All RSSI values are noise floor adjusted */ struct ath_tx_stat { diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 21e90bc..689bdbf 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c @@ -1750,7 +1750,7 @@ exit: /*****************/ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, - struct ath_xmit_status *tx_status) + int tx_flags) { struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -1771,12 +1771,10 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, tx_info->rate_driver_data[0] = NULL; } - if (tx_status->flags & ATH_TX_BAR) { + if (tx_flags & ATH_TX_BAR) tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - tx_status->flags &= ~ATH_TX_BAR; - } - if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { + if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { /* Frame was ACKed */ tx_info->flags |= IEEE80211_TX_STAT_ACK; } @@ -1803,29 +1801,22 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, int txok, int sendbar) { struct sk_buff *skb = bf->bf_mpdu; - struct ath_xmit_status tx_status; unsigned long flags; + int tx_flags = 0; - /* - * Set retry information. - * NB: Don't use the information in the descriptor, because the frame - * could be software retried. - */ - tx_status.retries = bf->bf_retries; - tx_status.flags = 0; if (sendbar) - tx_status.flags = ATH_TX_BAR; + tx_flags = ATH_TX_BAR; if (!txok) { - tx_status.flags |= ATH_TX_ERROR; + tx_flags |= ATH_TX_ERROR; if (bf_isxretried(bf)) - tx_status.flags |= ATH_TX_XRETRY; + tx_flags |= ATH_TX_XRETRY; } dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); - ath_tx_complete(sc, skb, &tx_status); + ath_tx_complete(sc, skb, tx_flags); /* * Return the list of ath_buf of this mpdu to free queue -- cgit v0.10.2 From d7873cb9abb5d8b4b9f7f5749af06e4e03798733 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 20 Mar 2009 15:53:16 +0200 Subject: mac80211: Fix memleak in nl80211 authentication on deinit This file was forgotten from the quilt patch that added MLME primitives, so the kfree on interface removal is missing. Fix this potential memleak by freeing the temporary Authentication frame IEs from SME when the interface is being removed. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f9f27b9..6b56dc2 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -660,6 +660,7 @@ static void ieee80211_teardown_sdata(struct net_device *dev) kfree(sdata->u.mgd.ie_reassocreq); kfree(sdata->u.mgd.ie_deauth); kfree(sdata->u.mgd.ie_disassoc); + kfree(sdata->u.mgd.sme_auth_ie); break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: -- cgit v0.10.2 From feeb44454996cf5b375fad21697bf6202fe30dd2 Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Fri, 20 Mar 2009 16:43:20 +0100 Subject: ssb: remove EXPERIMENTAL dependencies. ssb is not experimental anymore. Signed-off-by: Michael Buesch Signed-off-by: John W. Linville diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index b1b947e..540a294 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -53,11 +53,11 @@ config SSB_B43_PCI_BRIDGE config SSB_PCMCIAHOST_POSSIBLE bool - depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL + depends on SSB && (PCMCIA = y || PCMCIA = SSB) default y config SSB_PCMCIAHOST - bool "Support for SSB on PCMCIA-bus host (EXPERIMENTAL)" + bool "Support for SSB on PCMCIA-bus host" depends on SSB_PCMCIAHOST_POSSIBLE select SSB_SPROM help @@ -107,14 +107,14 @@ config SSB_DRIVER_PCICORE If unsure, say Y config SSB_PCICORE_HOSTMODE - bool "Hostmode support for SSB PCI core (EXPERIMENTAL)" - depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && EXPERIMENTAL + bool "Hostmode support for SSB PCI core" + depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS help PCIcore hostmode operation (external PCI bus). config SSB_DRIVER_MIPS - bool "SSB Broadcom MIPS core driver (EXPERIMENTAL)" - depends on SSB && MIPS && EXPERIMENTAL + bool "SSB Broadcom MIPS core driver" + depends on SSB && MIPS select SSB_SERIAL help Driver for the Sonics Silicon Backplane attached @@ -129,8 +129,8 @@ config SSB_EMBEDDED default y config SSB_DRIVER_EXTIF - bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)" - depends on SSB_DRIVER_MIPS && EXPERIMENTAL + bool "SSB Broadcom EXTIF core driver" + depends on SSB_DRIVER_MIPS help Driver for the Sonics Silicon Backplane attached Broadcom EXTIF core. -- cgit v0.10.2 From 65fc73ac4a310945dfeceac961726c2765ad2ec0 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 20 Mar 2009 21:21:16 +0200 Subject: nl80211: Remove NL80211_CMD_SET_MGMT_EXTRA_IE The functionality that NL80211_CMD_SET_MGMT_EXTRA_IE provided can now be achieved with cleaner design by adding IE(s) into NL80211_CMD_TRIGGER_SCAN, NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE. Since this is a very recently added command and there are no known (or known planned) applications using NL80211_CMD_SET_MGMT_EXTRA_IE and taken into account how much extra complexity it adds to the IE processing we have now (and need to add in the future to fix IE order in couple of frames), it looks like the best option is to just remove the implementation of this command for now. The enum values themselves are left to avoid changing the nl80211 command or attribute numbers. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 9685eaa..cbe8ce3 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -142,6 +142,12 @@ * %NL80211_ATTR_IE. If the command succeeds, the requested data will be * added to all specified management frames generated by * kernel/firmware/driver. + * Note: This command has been removed and it is only reserved at this + * point to avoid re-using existing command number. The functionality this + * command was planned for has been provided with cleaner design with the + * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN, + * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, + * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE. * * @NL80211_CMD_GET_SCAN: get scan results * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters @@ -238,7 +244,7 @@ enum nl80211_commands { NL80211_CMD_GET_MESH_PARAMS, NL80211_CMD_SET_MESH_PARAMS, - NL80211_CMD_SET_MGMT_EXTRA_IE, + NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */, NL80211_CMD_GET_REG, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 0da9a55..dca4a6b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -471,26 +471,6 @@ struct ieee80211_txq_params { u8 aifs; }; -/** - * struct mgmt_extra_ie_params - Extra management frame IE parameters - * - * Used to add extra IE(s) into management frames. If the driver cannot add the - * requested data into all management frames of the specified subtype that are - * generated in kernel or firmware/hardware, it must reject the configuration - * call. The IE data buffer is added to the end of the specified management - * frame body after all other IEs. This addition is not applied to frames that - * are injected through a monitor interface. - * - * @subtype: Management frame subtype - * @ies: IE data buffer or %NULL to remove previous data - * @ies_len: Length of @ies in octets - */ -struct mgmt_extra_ie_params { - u8 subtype; - u8 *ies; - int ies_len; -}; - /* from net/wireless.h */ struct wiphy; @@ -743,8 +723,6 @@ struct cfg80211_disassoc_request { * * @set_channel: Set channel * - * @set_mgmt_extra_ie: Set extra IE data for management frames - * * @scan: Request to do a scan. If returning zero, the scan request is given * the driver, and will be valid until passed to cfg80211_scan_done(). * For scan results, call cfg80211_inform_bss(); you can call this outside @@ -828,10 +806,6 @@ struct cfg80211_ops { struct ieee80211_channel *chan, enum nl80211_channel_type channel_type); - int (*set_mgmt_extra_ie)(struct wiphy *wiphy, - struct net_device *dev, - struct mgmt_extra_ie_params *params); - int (*scan)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *request); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 223e536..f5c15c9 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1181,91 +1181,6 @@ static int ieee80211_set_channel(struct wiphy *wiphy, return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); } -static int set_mgmt_extra_ie_sta(struct ieee80211_sub_if_data *sdata, - u8 subtype, u8 *ies, size_t ies_len) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - - switch (subtype) { - case IEEE80211_STYPE_PROBE_REQ >> 4: - if (local->ops->hw_scan) - break; - kfree(ifmgd->ie_probereq); - ifmgd->ie_probereq = ies; - ifmgd->ie_probereq_len = ies_len; - return 0; - case IEEE80211_STYPE_PROBE_RESP >> 4: - kfree(ifmgd->ie_proberesp); - ifmgd->ie_proberesp = ies; - ifmgd->ie_proberesp_len = ies_len; - return 0; - case IEEE80211_STYPE_AUTH >> 4: - kfree(ifmgd->ie_auth); - ifmgd->ie_auth = ies; - ifmgd->ie_auth_len = ies_len; - return 0; - case IEEE80211_STYPE_ASSOC_REQ >> 4: - kfree(ifmgd->ie_assocreq); - ifmgd->ie_assocreq = ies; - ifmgd->ie_assocreq_len = ies_len; - return 0; - case IEEE80211_STYPE_REASSOC_REQ >> 4: - kfree(ifmgd->ie_reassocreq); - ifmgd->ie_reassocreq = ies; - ifmgd->ie_reassocreq_len = ies_len; - return 0; - case IEEE80211_STYPE_DEAUTH >> 4: - kfree(ifmgd->ie_deauth); - ifmgd->ie_deauth = ies; - ifmgd->ie_deauth_len = ies_len; - return 0; - case IEEE80211_STYPE_DISASSOC >> 4: - kfree(ifmgd->ie_disassoc); - ifmgd->ie_disassoc = ies; - ifmgd->ie_disassoc_len = ies_len; - return 0; - } - - return -EOPNOTSUPP; -} - -static int ieee80211_set_mgmt_extra_ie(struct wiphy *wiphy, - struct net_device *dev, - struct mgmt_extra_ie_params *params) -{ - struct ieee80211_sub_if_data *sdata; - u8 *ies; - size_t ies_len; - int ret = -EOPNOTSUPP; - - if (params->ies) { - ies = kmemdup(params->ies, params->ies_len, GFP_KERNEL); - if (ies == NULL) - return -ENOMEM; - ies_len = params->ies_len; - } else { - ies = NULL; - ies_len = 0; - } - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - switch (sdata->vif.type) { - case NL80211_IFTYPE_STATION: - ret = set_mgmt_extra_ie_sta(sdata, params->subtype, - ies, ies_len); - break; - default: - ret = -EOPNOTSUPP; - break; - } - - if (ret) - kfree(ies); - return ret; -} - #ifdef CONFIG_PM static int ieee80211_suspend(struct wiphy *wiphy) { @@ -1465,7 +1380,6 @@ struct cfg80211_ops mac80211_config_ops = { .change_bss = ieee80211_change_bss, .set_txq_params = ieee80211_set_txq_params, .set_channel = ieee80211_set_channel, - .set_mgmt_extra_ie = ieee80211_set_mgmt_extra_ie, .suspend = ieee80211_suspend, .resume = ieee80211_resume, .scan = ieee80211_scan, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 547cfac..f69e84a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -323,21 +323,6 @@ struct ieee80211_if_managed { int wmm_last_param_set; /* Extra IE data for management frames */ - u8 *ie_probereq; - size_t ie_probereq_len; - u8 *ie_proberesp; - size_t ie_proberesp_len; - u8 *ie_auth; - size_t ie_auth_len; - u8 *ie_assocreq; - size_t ie_assocreq_len; - u8 *ie_reassocreq; - size_t ie_reassocreq_len; - u8 *ie_deauth; - size_t ie_deauth_len; - u8 *ie_disassoc; - size_t ie_disassoc_len; - u8 *sme_auth_ie; size_t sme_auth_ie_len; }; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 6b56dc2..34f4798 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -653,13 +653,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev) kfree(sdata->u.mgd.extra_ie); kfree(sdata->u.mgd.assocreq_ies); kfree(sdata->u.mgd.assocresp_ies); - kfree(sdata->u.mgd.ie_probereq); - kfree(sdata->u.mgd.ie_proberesp); - kfree(sdata->u.mgd.ie_auth); - kfree(sdata->u.mgd.ie_assocreq); - kfree(sdata->u.mgd.ie_reassocreq); - kfree(sdata->u.mgd.ie_deauth); - kfree(sdata->u.mgd.ie_disassoc); kfree(sdata->u.mgd.sme_auth_ie); break; case NL80211_IFTYPE_WDS: diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d1bcc84..b0808ef 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -82,38 +82,23 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss, /* frame sending functions */ -static void add_extra_ies(struct sk_buff *skb, u8 *ies, size_t ies_len) -{ - if (ies) - memcpy(skb_put(skb, ies_len), ies, ies_len); -} - static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - u8 *pos, *ies, *ht_ie, *e_ies; + u8 *pos, *ies, *ht_ie; int i, len, count, rates_len, supp_rates_len; u16 capab; struct ieee80211_bss *bss; int wmm = 0; struct ieee80211_supported_band *sband; u32 rates = 0; - size_t e_ies_len; - - if (ifmgd->flags & IEEE80211_STA_PREV_BSSID_SET) { - e_ies = sdata->u.mgd.ie_reassocreq; - e_ies_len = sdata->u.mgd.ie_reassocreq_len; - } else { - e_ies = sdata->u.mgd.ie_assocreq; - e_ies_len = sdata->u.mgd.ie_assocreq_len; - } skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + ifmgd->extra_ie_len + - ifmgd->ssid_len + e_ies_len); + ifmgd->ssid_len); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " "frame\n", sdata->dev->name); @@ -304,8 +289,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); } - add_extra_ies(skb, e_ies, e_ies_len); - kfree(ifmgd->assocreq_ies); ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies; ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL); @@ -323,19 +306,8 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - u8 *ies; - size_t ies_len; - if (stype == IEEE80211_STYPE_DEAUTH) { - ies = sdata->u.mgd.ie_deauth; - ies_len = sdata->u.mgd.ie_deauth_len; - } else { - ies = sdata->u.mgd.ie_disassoc; - ies_len = sdata->u.mgd.ie_disassoc_len; - } - - skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + - ies_len); + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for " "deauth/disassoc frame\n", sdata->dev->name); @@ -353,8 +325,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, /* u.deauth.reason_code == u.disassoc.reason_code */ mgmt->u.deauth.reason_code = cpu_to_le16(reason); - add_extra_ies(skb, ies, ies_len); - ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index e0431a1..444bb14 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -846,16 +846,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - const u8 *ie_auth = NULL; - int ie_auth_len = 0; - - if (sdata->vif.type == NL80211_IFTYPE_STATION) { - ie_auth_len = sdata->u.mgd.ie_auth_len; - ie_auth = sdata->u.mgd.ie_auth; - } skb = dev_alloc_skb(local->hw.extra_tx_headroom + - sizeof(*mgmt) + 6 + extra_len + ie_auth_len); + sizeof(*mgmt) + 6 + extra_len); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for auth " "frame\n", sdata->dev->name); @@ -877,8 +870,6 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, mgmt->u.auth.status_code = cpu_to_le16(0); if (extra) memcpy(skb_put(skb, extra_len), extra, extra_len); - if (ie_auth) - memcpy(skb_put(skb, ie_auth_len), ie_auth, ie_auth_len); ieee80211_tx_skb(sdata, skb, encrypt); } @@ -891,20 +882,11 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, struct ieee80211_supported_band *sband; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - u8 *pos, *supp_rates, *esupp_rates = NULL, *extra_preq_ie = NULL; - int i, extra_preq_ie_len = 0; - - switch (sdata->vif.type) { - case NL80211_IFTYPE_STATION: - extra_preq_ie_len = sdata->u.mgd.ie_probereq_len; - extra_preq_ie = sdata->u.mgd.ie_probereq; - break; - default: - break; - } + u8 *pos, *supp_rates, *esupp_rates = NULL; + int i; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + - ie_len + extra_preq_ie_len); + ie_len); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for probe " "request\n", sdata->dev->name); @@ -953,9 +935,6 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, if (ie) memcpy(skb_put(skb, ie_len), ie, ie_len); - if (extra_preq_ie) - memcpy(skb_put(skb, extra_preq_ie_len), extra_preq_ie, - extra_preq_ie_len); ieee80211_tx_skb(sdata, skb, 0); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9e1318d..44c7997 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -269,7 +269,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(add_mpath, NEW_MPATH); CMD(set_mesh_params, SET_MESH_PARAMS); CMD(change_bss, SET_BSS); - CMD(set_mgmt_extra_ie, SET_MGMT_EXTRA_IE); CMD(auth, AUTHENTICATE); CMD(assoc, ASSOCIATE); CMD(deauth, DEAUTHENTICATE); @@ -2355,46 +2354,6 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } -static int nl80211_set_mgmt_extra_ie(struct sk_buff *skb, - struct genl_info *info) -{ - struct cfg80211_registered_device *drv; - int err; - struct net_device *dev; - struct mgmt_extra_ie_params params; - - memset(¶ms, 0, sizeof(params)); - - if (!info->attrs[NL80211_ATTR_MGMT_SUBTYPE]) - return -EINVAL; - params.subtype = nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]); - if (params.subtype > 15) - return -EINVAL; /* FC Subtype field is 4 bits (0..15) */ - - if (info->attrs[NL80211_ATTR_IE]) { - params.ies = nla_data(info->attrs[NL80211_ATTR_IE]); - params.ies_len = nla_len(info->attrs[NL80211_ATTR_IE]); - } - - rtnl_lock(); - - err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); - if (err) - goto out_rtnl; - - if (drv->ops->set_mgmt_extra_ie) - err = drv->ops->set_mgmt_extra_ie(&drv->wiphy, dev, ¶ms); - else - err = -EOPNOTSUPP; - - cfg80211_put_dev(drv); - dev_put(dev); - out_rtnl: - rtnl_unlock(); - - return err; -} - static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *drv; @@ -3044,12 +3003,6 @@ static struct genl_ops nl80211_ops[] = { .flags = GENL_ADMIN_PERM, }, { - .cmd = NL80211_CMD_SET_MGMT_EXTRA_IE, - .doit = nl80211_set_mgmt_extra_ie, - .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, - }, - { .cmd = NL80211_CMD_TRIGGER_SCAN, .doit = nl80211_trigger_scan, .policy = nl80211_policy, -- cgit v0.10.2 From 255e737eab645ec6037baeca04a5e0a7c3b1f459 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 20 Mar 2009 21:21:17 +0200 Subject: nl80211: Add more through validation of MLME command parameters Check that the used authentication type and reason code are valid here so that drivers/mac80211 do not need to care about this. In addition, remove the unnecessary validation of SSID attribute length which is taken care of by netlink policy. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 44c7997..6f38ee7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2614,6 +2614,14 @@ static int nl80211_dump_scan(struct sk_buff *skb, return err; } +static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) +{ + return auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM || + auth_type == NL80211_AUTHTYPE_SHARED_KEY || + auth_type == NL80211_AUTHTYPE_FT || + auth_type == NL80211_AUTHTYPE_NETWORK_EAP; +} + static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *drv; @@ -2666,6 +2674,10 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { req.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); + if (!nl80211_valid_auth_type(req.auth_type)) { + err = -EINVAL; + goto out; + } } err = drv->ops->auth(&drv->wiphy, dev, &req); @@ -2718,10 +2730,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) } } - if (nla_len(info->attrs[NL80211_ATTR_SSID]) > IEEE80211_MAX_SSID_LEN) { - err = -EINVAL; - goto out; - } req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); @@ -2769,9 +2777,15 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); - if (info->attrs[NL80211_ATTR_REASON_CODE]) + if (info->attrs[NL80211_ATTR_REASON_CODE]) { req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + if (req.reason_code == 0) { + /* Reason Code 0 is reserved */ + err = -EINVAL; + goto out; + } + } if (info->attrs[NL80211_ATTR_IE]) { req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); @@ -2817,9 +2831,15 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); - if (info->attrs[NL80211_ATTR_REASON_CODE]) + if (info->attrs[NL80211_ATTR_REASON_CODE]) { req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + if (req.reason_code == 0) { + /* Reason Code 0 is reserved */ + err = -EINVAL; + goto out; + } + } if (info->attrs[NL80211_ATTR_IE]) { req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); -- cgit v0.10.2 From 35a8efe1a67ba5d7bb7492f67f52ed2aa4925892 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 20 Mar 2009 21:21:18 +0200 Subject: nl80211: Check that netif_runnin is true in cfg80211 code We do not want to require all the drivers using cfg80211 to need to do this or to be prepared to handle these commands when the interface is down. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index f5c15c9..b5810b4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -728,10 +728,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, int err; int layer2_update; - /* Prevent a race with changing the rate control algorithm */ - if (!netif_running(dev)) - return -ENETDOWN; - if (params->vlan) { sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); @@ -860,9 +856,6 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, struct sta_info *sta; int err; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) @@ -913,9 +906,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy, struct mesh_path *mpath; struct sta_info *sta; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) @@ -1202,9 +1192,6 @@ static int ieee80211_scan(struct wiphy *wiphy, { struct ieee80211_sub_if_data *sdata; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_STATION && @@ -1220,9 +1207,6 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_STATION) @@ -1282,9 +1266,6 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_sub_if_data *sdata; int ret; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_STATION) @@ -1323,9 +1304,6 @@ static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; @@ -1339,9 +1317,6 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata; - if (!netif_running(dev)) - return -ENETDOWN; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type != NL80211_IFTYPE_STATION) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6f38ee7..6bb73a3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1556,6 +1556,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, ¶ms); out: @@ -1808,6 +1813,11 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop); out: @@ -1846,6 +1856,11 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop); out: @@ -2380,6 +2395,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + if (drv->scan_req) { err = -EBUSY; goto out; @@ -2641,6 +2661,11 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + if (!info->attrs[NL80211_ATTR_MAC]) { err = -EINVAL; goto out; @@ -2709,6 +2734,11 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_SSID]) { err = -EINVAL; @@ -2767,6 +2797,11 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + if (!info->attrs[NL80211_ATTR_MAC]) { err = -EINVAL; goto out; @@ -2821,6 +2856,11 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + if (!info->attrs[NL80211_ATTR_MAC]) { err = -EINVAL; goto out; -- cgit v0.10.2 From eec60b037a875513d9715dcdb90b13ed81fc5f26 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 20 Mar 2009 21:21:19 +0200 Subject: nl80211: Check iftype in cfg80211 code We do not want to require all the drivers using cfg80211 to need to do this. In addition, make the error values consistent by using EOPNOTSUPP instead of semi-random assortment of errno values. Signed-off-by: Jouni Malinen Signed-off-by: John W. Linville diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b5810b4..e677b75 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -540,9 +540,6 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_AP) - return -EINVAL; - old = sdata->u.ap.beacon; if (old) @@ -559,9 +556,6 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_AP) - return -EINVAL; - old = sdata->u.ap.beacon; if (!old) @@ -577,9 +571,6 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_AP) - return -EINVAL; - old = sdata->u.ap.beacon; if (!old) @@ -858,9 +849,6 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return -ENOTSUPP; - rcu_read_lock(); sta = sta_info_get(local, next_hop); if (!sta) { @@ -908,9 +896,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return -ENOTSUPP; - rcu_read_lock(); sta = sta_info_get(local, next_hop); @@ -979,9 +964,6 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return -ENOTSUPP; - rcu_read_lock(); mpath = mesh_path_lookup(dst, sdata); if (!mpath) { @@ -1003,9 +985,6 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return -ENOTSUPP; - rcu_read_lock(); mpath = mesh_path_lookup_by_idx(idx, sdata); if (!mpath) { @@ -1025,8 +1004,6 @@ static int ieee80211_get_mesh_params(struct wiphy *wiphy, struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return -ENOTSUPP; memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); return 0; } @@ -1044,9 +1021,6 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy, struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return -ENOTSUPP; - /* Set the config options which we are interested in setting */ conf = &(sdata->u.mesh.mshcfg); if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) @@ -1094,9 +1068,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_AP) - return -EINVAL; - if (params->use_cts_prot >= 0) { sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; changed |= BSS_CHANGED_ERP_CTS_PROT; @@ -1209,9 +1180,6 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; - switch (req->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN; @@ -1268,9 +1236,6 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; - if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 || !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED)) return -ENOLINK; /* not authenticated */ @@ -1305,8 +1270,6 @@ static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; /* TODO: req->ie */ return ieee80211_sta_deauthenticate(sdata, req->reason_code); @@ -1319,9 +1282,6 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->vif.type != NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; - /* TODO: req->ie */ return ieee80211_sta_disassociate(sdata, req->reason_code); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6bb73a3..a7d0b94 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1049,6 +1049,11 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) if (err) goto unlock_rtnl; + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + err = -EOPNOTSUPP; + goto out; + } + switch (info->genlhdr->cmd) { case NL80211_CMD_NEW_BEACON: /* these are required for NEW_BEACON */ @@ -1136,6 +1141,10 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + err = -EOPNOTSUPP; + goto out; + } err = drv->ops->del_beacon(&drv->wiphy, dev); out: @@ -1324,7 +1333,7 @@ static int nl80211_dump_station(struct sk_buff *skb, } if (!dev->ops->dump_station) { - err = -ENOSYS; + err = -EOPNOTSUPP; goto out_err; } @@ -1698,10 +1707,15 @@ static int nl80211_dump_mpath(struct sk_buff *skb, } if (!dev->ops->dump_mpath) { - err = -ENOSYS; + err = -EOPNOTSUPP; goto out_err; } + if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + while (1) { err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, dst, next_hop, &pinfo); @@ -1759,6 +1773,11 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo); if (err) goto out; @@ -1813,6 +1832,11 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + if (!netif_running(dev)) { err = -ENETDOWN; goto out; @@ -1856,6 +1880,11 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + if (!netif_running(dev)) { err = -ENETDOWN; goto out; @@ -1944,6 +1973,11 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + err = -EOPNOTSUPP; + goto out; + } + err = drv->ops->change_bss(&drv->wiphy, dev, ¶ms); out: @@ -2661,6 +2695,11 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + if (!netif_running(dev)) { err = -ENETDOWN; goto out; @@ -2734,6 +2773,11 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + if (!netif_running(dev)) { err = -ENETDOWN; goto out; @@ -2797,6 +2841,11 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + if (!netif_running(dev)) { err = -ENETDOWN; goto out; @@ -2856,6 +2905,11 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) goto out; } + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + if (!netif_running(dev)) { err = -ENETDOWN; goto out; -- cgit v0.10.2 From d8cd7effc20027c313d4086b123046ff9f9a5814 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Mon, 23 Mar 2009 15:37:45 +0100 Subject: p54: fix SoftLED compile dependencies This patch fixes a compile problem when the MAC80211_LEDS triggers are enabled but not LED class itself. (which is sort of pointless, but anyway...) Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig index 0e344ac..7d6e9d1 100644 --- a/drivers/net/wireless/p54/Kconfig +++ b/drivers/net/wireless/p54/Kconfig @@ -71,3 +71,8 @@ config P54_SPI Nokia's N800/N810 Portable Internet Tablet. If you choose to build a module, it'll be called p54spi. + +config P54_LEDS + bool + depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON) + default y diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c index 0a98983..0c1b057 100644 --- a/drivers/net/wireless/p54/p54common.c +++ b/drivers/net/wireless/p54/p54common.c @@ -21,9 +21,9 @@ #include #include -#ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS #include -#endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ #include "p54.h" #include "p54common.h" @@ -2420,7 +2420,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, return 0; } -#ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS static void p54_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { @@ -2508,7 +2508,7 @@ static void p54_unregister_leds(struct ieee80211_hw *dev) if (priv->assoc_led.registered) led_classdev_unregister(&priv->assoc_led.led_dev); } -#endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ static const struct ieee80211_ops p54_ops = { .tx = p54_tx, @@ -2592,11 +2592,11 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) return err; } - #ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS err = p54_init_leds(dev); if (err) return err; - #endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); return 0; @@ -2610,9 +2610,9 @@ void p54_free_common(struct ieee80211_hw *dev) kfree(priv->output_limit); kfree(priv->curve_data); - #ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS p54_unregister_leds(dev); - #endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ } EXPORT_SYMBOL_GPL(p54_free_common); -- cgit v0.10.2 From 9cb5412b0760981d43ac3e612992c90cea690e72 Mon Sep 17 00:00:00 2001 From: Pat Erley Date: Fri, 20 Mar 2009 22:59:59 -0400 Subject: Add mesh point functionality to ath9k This patch enables mesh point operation for ath9k. Tested with b43, ath9k, rt2500usb, and ath5k as peers. Signed-off-by: Pat Erley Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c index e5b0071..ec99573 100644 --- a/drivers/net/wireless/ath9k/beacon.c +++ b/drivers/net/wireless/ath9k/beacon.c @@ -70,7 +70,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, ds = bf->bf_desc; flags = ATH9K_TXDESC_NOACK; - if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC && + if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) && (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { ds->ds_link = bf->bf_daddr; /* self-linked */ flags |= ATH9K_TXDESC_VEOL; @@ -728,6 +729,7 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) ath_beacon_config_ap(sc, &conf, avp); break; case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: ath_beacon_config_adhoc(sc, &conf, avp, vif); break; case NL80211_IFTYPE_STATION: diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c index 15e4d42..b15eaf8 100644 --- a/drivers/net/wireless/ath9k/hw.c +++ b/drivers/net/wireless/ath9k/hw.c @@ -1448,6 +1448,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC | AR_STA_ID1_KSRCH_MODE); REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); @@ -3149,6 +3150,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) flags |= AR_TBTT_TIMER_EN; break; case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: REG_SET_BIT(ah, AR_TXCFG, AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); REG_WRITE(ah, AR_NEXT_NDP_TIMER, diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 4c29cef..c13e4e5 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c @@ -1599,7 +1599,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC); + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT); hw->wiphy->reg_notifier = ath9k_reg_notifier; hw->wiphy->strict_regulatory = true; @@ -2207,18 +2208,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, ic_opmode = NL80211_IFTYPE_STATION; break; case NL80211_IFTYPE_ADHOC: - if (sc->nbcnvifs >= ATH_BCBUF) { - ret = -ENOBUFS; - goto out; - } - ic_opmode = NL80211_IFTYPE_ADHOC; - break; case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: if (sc->nbcnvifs >= ATH_BCBUF) { ret = -ENOBUFS; goto out; } - ic_opmode = NL80211_IFTYPE_AP; + ic_opmode = conf->type; break; default: DPRINTF(sc, ATH_DBG_FATAL, @@ -2254,7 +2250,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, * Note we only do this (at the moment) for station mode. */ if ((conf->type == NL80211_IFTYPE_STATION) || - (conf->type == NL80211_IFTYPE_ADHOC)) { + (conf->type == NL80211_IFTYPE_ADHOC) || + (conf->type == NL80211_IFTYPE_MESH_POINT)) { if (ath9k_hw_phycounters(sc->sc_ah)) sc->imask |= ATH9K_INT_MIB; sc->imask |= ATH9K_INT_TSFOOR; @@ -2301,8 +2298,9 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, del_timer_sync(&sc->ani.timer); /* Reclaim beacon resources */ - if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || - sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) { + if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || + (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); ath_beacon_return(sc, avp); } @@ -2435,6 +2433,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: /* Set BSSID */ memcpy(sc->curbssid, conf->bssid, ETH_ALEN); memcpy(avp->bssid, conf->bssid, ETH_ALEN); @@ -2458,7 +2457,8 @@ static int ath9k_config_interface(struct ieee80211_hw *hw, } if ((vif->type == NL80211_IFTYPE_ADHOC) || - (vif->type == NL80211_IFTYPE_AP)) { + (vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) { if ((conf->changed & IEEE80211_IFCC_BEACON) || (conf->changed & IEEE80211_IFCC_BEACON_ENABLED && conf->enable_beacon)) { diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c index 6c2fd39..824ccbb 100644 --- a/drivers/net/wireless/ath9k/rc.c +++ b/drivers/net/wireless/ath9k/rc.c @@ -1619,6 +1619,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, /* Choose rate table first */ if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) || (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) { rate_table = ath_choose_rate_table(sc, sband->band, sta->ht_cap.ht_supported, -- cgit v0.10.2 From 98dfaa577855a551e798e3a99b934386698d2026 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 20 Mar 2009 23:46:11 -0400 Subject: mac80211_hwsim: let the reg workqueue breathe when regtest is set Without this the regulatory domain isn't seen and we end up intersecting for each request (each radio). Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 2368b7f..5511610 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1041,6 +1041,9 @@ static int __init init_mac80211_hwsim(void) break; } + /* give the regulatory workqueue a chance to run */ + if (regtest) + schedule_timeout_interruptible(1); err = ieee80211_register_hw(hw); if (err < 0) { printk(KERN_DEBUG "mac80211_hwsim: " -- cgit v0.10.2 From 2e097dc65673ed421bbc2e49f52c125aa43a8ee6 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 20 Mar 2009 23:53:04 -0400 Subject: cfg80211: force last_request to be set for OLD_REG if regdom is EU Although EU is a bogus alpha2 we need to process the send request as our code depends on last_request being set. Cc: stable@kernel.org Reported-by: Quentin Armitage Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/wireless/reg.c b/net/wireless/reg.c index eb8b8ed..ead9dcc 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2135,11 +2135,14 @@ int regulatory_init(void) /* * The old code still requests for a new regdomain and if * you have CRDA you get it updated, otherwise you get - * stuck with the static values. We ignore "EU" code as - * that is not a valid ISO / IEC 3166 alpha2 + * stuck with the static values. Since "EU" is not a valid + * ISO / IEC 3166 alpha2 code we can't expect userpace to + * give us a regulatory domain for it. We need last_request + * iniitalized though so lets just send a request which we + * know will be ignored... this crap will be removed once + * OLD_REG dies. */ - if (ieee80211_regdom[0] != 'E' || ieee80211_regdom[1] != 'U') - err = regulatory_hint_core(ieee80211_regdom); + err = regulatory_hint_core(ieee80211_regdom); #else cfg80211_regdomain = cfg80211_world_regdom; -- cgit v0.10.2 From cc0b6fe88e99096868bdbacbf486c97299533b5a Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 20 Mar 2009 23:53:05 -0400 Subject: cfg80211: fix incorrect assumption on last_request for 11d The incorrect assumption is the last regulatory request (last_request) is always a country IE when processing country IEs. Although this is true 99% of the time the first time this happens this could not be true. This fixes an oops in the branch check for the last_request when accessing drv_last_ie. The access was done under the assumption the struct won't be null. Note to stable: to port to 29 replace as follows, only 29 has country IE code: s|NL80211_REGDOM_SET_BY_COUNTRY_IE|REGDOM_SET_BY_COUNTRY_IE Cc: stable@kernel.org Reported-by: Quentin Armitage Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ead9dcc..9afc916 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1601,6 +1601,10 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy, assert_cfg80211_lock(); + if (unlikely(last_request->initiator != + NL80211_REGDOM_SET_BY_COUNTRY_IE)) + return false; + request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); if (!request_wiphy) @@ -1663,7 +1667,9 @@ void regulatory_hint_11d(struct wiphy *wiphy, * we optimize an early check to exit out early if we don't have to * do anything */ - if (likely(wiphy_idx_valid(last_request->wiphy_idx))) { + if (likely(last_request->initiator == + NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy_idx_valid(last_request->wiphy_idx))) { struct cfg80211_registered_device *drv_last_ie; drv_last_ie = -- cgit v0.10.2 From 6ee7d33056f6e6fc7437d980dcc741816deedd0f Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 20 Mar 2009 23:53:06 -0400 Subject: cfg80211: make regdom module parameter available oustide of OLD_REG It seems a few users are using this module parameter although its not recommended. People are finding it useful despite there being utilities for setting this in userspace. I'm not aware of any distribution using this though. Until userspace and distributions catch up with a default userspace automatic replacement (GeoClue integration would be nirvana) we copy the ieee80211_regdom module parameter from OLD_REG to the new reg code to help these users migrate. Users who are using the non-valid ISO / IEC 3166 alpha "EU" in their ieee80211_regdom module parameter and migrate to non-OLD_REG enabled system will world roam. This also schedules removal of this same ieee80211_regdom module parameter circa March 2010. Hope is by then nirvana is reached and users will abandoned the module parameter completely. Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index e47c0ff..8365f52 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -6,7 +6,31 @@ be removed from this file. --------------------------- -What: old static regulatory information and ieee80211_regdom module parameter +What: The ieee80211_regdom module parameter +When: March 2010 + +Why: This was inherited by the CONFIG_WIRELESS_OLD_REGULATORY code, + and currently serves as an option for users to define an + ISO / IEC 3166 alpha2 code for the country they are currently + present in. Although there are userspace API replacements for this + through nl80211 distributions haven't yet caught up with implementing + decent alternatives through standard GUIs. Although available as an + option through iw or wpa_supplicant its just a matter of time before + distributions pick up good GUI options for this. The ideal solution + would actually consist of intelligent designs which would do this for + the user automatically even when travelling through different countries. + Until then we leave this module parameter as a compromise. + + When userspace improves with reasonable widely-available alternatives for + this we will no longer need this module parameter. This entry hopes that + by the super-futuristically looking date of "March 2010" we will have + such replacements widely available. + +Who: Luis R. Rodriguez + +--------------------------- + +What: old static regulatory information When: 2.6.29 Why: The old regulatory infrastructure has been replaced with a new one which does not require statically defined regulatory domains. We do @@ -17,9 +41,7 @@ Why: The old regulatory infrastructure has been replaced with a new one * JP * EU and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was - set. We also kept around the ieee80211_regdom module parameter in case - some applications were relying on it. Changing regulatory domains - can now be done instead by using nl80211, as is done with iw. + set. Who: Luis R. Rodriguez --------------------------- diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 9afc916..ac048a1 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -122,9 +122,14 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom = #ifdef CONFIG_WIRELESS_OLD_REGULATORY static char *ieee80211_regdom = "US"; +#else +static char *ieee80211_regdom = "00"; +#endif + module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); +#ifdef CONFIG_WIRELESS_OLD_REGULATORY /* * We assume 40 MHz bandwidth for the old regulatory work. * We make emphasis we are using the exact same frequencies @@ -2152,7 +2157,7 @@ int regulatory_init(void) #else cfg80211_regdomain = cfg80211_world_regdom; - err = regulatory_hint_core("00"); + err = regulatory_hint_core(ieee80211_regdom); #endif if (err) { if (err == -ENOMEM) -- cgit v0.10.2 From 86f04680df4a136a4a90501572dc2f31f8426581 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 20 Mar 2009 23:53:07 -0400 Subject: cfg80211: remove code about country IE support with OLD_REG We had left in code to allow interested developers to add support for parsing country IEs when OLD_REG was enabled. This never happened and since we're going to remove OLD_REG lets just remove these comments and code for it. This code path was never being entered so this has no functional change. Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ac048a1..6327e16 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1420,16 +1420,6 @@ new_request: return r; } - /* - * Note: When CONFIG_WIRELESS_OLD_REGULATORY is enabled - * AND if CRDA is NOT present nothing will happen, if someone - * wants to bother with 11d with OLD_REG you can add a timer. - * If after x amount of time nothing happens you can call: - * - * return set_regdom(country_ie_regdomain); - * - * to intersect with the static rd - */ return call_crda(last_request->alpha2); } @@ -2033,28 +2023,21 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) */ BUG_ON(!country_ie_regdomain); + BUG_ON(rd == country_ie_regdomain); - if (rd != country_ie_regdomain) { - /* - * Intersect what CRDA returned and our what we - * had built from the Country IE received - */ + /* + * Intersect what CRDA returned and our what we + * had built from the Country IE received + */ - intersected_rd = regdom_intersect(rd, country_ie_regdomain); + intersected_rd = regdom_intersect(rd, country_ie_regdomain); - reg_country_ie_process_debug(rd, country_ie_regdomain, - intersected_rd); + reg_country_ie_process_debug(rd, + country_ie_regdomain, + intersected_rd); - kfree(country_ie_regdomain); - country_ie_regdomain = NULL; - } else { - /* - * This would happen when CRDA was not present and - * OLD_REGULATORY was enabled. We intersect our Country - * IE rd and what was set on cfg80211 originally - */ - intersected_rd = regdom_intersect(rd, cfg80211_regdomain); - } + kfree(country_ie_regdomain); + country_ie_regdomain = NULL; if (!intersected_rd) return -EINVAL; -- cgit v0.10.2 From ac7f9cfa2c3b810e0adfb889ad407a8c79a84dbe Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 21 Mar 2009 17:07:59 +0100 Subject: cfg80211: accept no-op interface mode changes When somebody tries to set the interface mode to the existing mode, don't ask the driver but silently accept the setting. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a7d0b94..8808431 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -607,6 +607,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) enum nl80211_iftype type; struct net_device *dev; u32 _flags, *flags = NULL; + bool change = false; memset(¶ms, 0, sizeof(params)); @@ -620,11 +621,17 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) type = dev->ieee80211_ptr->iftype; dev_put(dev); - err = -EINVAL; if (info->attrs[NL80211_ATTR_IFTYPE]) { - type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); - if (type > NL80211_IFTYPE_MAX) + enum nl80211_iftype ntype; + + ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); + if (type != ntype) + change = true; + type = ntype; + if (type > NL80211_IFTYPE_MAX) { + err = -EINVAL; goto unlock; + } } if (!drv->ops->change_virtual_intf || @@ -640,6 +647,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) } params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); + change = true; } if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { @@ -649,12 +657,18 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) } err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], &_flags); - if (!err) - flags = &_flags; + if (err) + goto unlock; + + flags = &_flags; + change = true; } - err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, - type, flags, ¶ms); + if (change) + err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, + type, flags, ¶ms); + else + err = 0; dev = __dev_get_by_index(&init_net, ifindex); WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type)); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index b84a9b4..0fd1db6 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -66,6 +66,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, struct cfg80211_registered_device *rdev; struct vif_params vifparams; enum nl80211_iftype type; + int ret; if (!wdev) return -EOPNOTSUPP; @@ -96,10 +97,16 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, return -EINVAL; } + if (type == wdev->iftype) + return 0; + memset(&vifparams, 0, sizeof(vifparams)); - return rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type, - NULL, &vifparams); + ret = rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type, + NULL, &vifparams); + WARN_ON(!ret && wdev->iftype != type); + + return ret; } EXPORT_SYMBOL(cfg80211_wext_siwmode); -- cgit v0.10.2 From 7986cf9581767d250ca0e5a554541bb276e08d21 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 21 Mar 2009 17:08:43 +0100 Subject: mac80211: remove mixed-cell and userspace MLME code Neither can currently be set from userspace, so there's no regression potential, and neither will be supported from userspace since the new userspace APIs allow the SME, which is in userspace, to control all we need. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index f69e84a..564167f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -247,8 +247,9 @@ struct mesh_preq_queue { #define IEEE80211_STA_ASSOCIATED BIT(4) #define IEEE80211_STA_PROBEREQ_POLL BIT(5) #define IEEE80211_STA_CREATE_IBSS BIT(6) -#define IEEE80211_STA_MIXED_CELL BIT(7) +/* hole at 7, please re-use */ #define IEEE80211_STA_WMM_ENABLED BIT(8) +/* hole at 9, please re-use */ #define IEEE80211_STA_AUTO_SSID_SEL BIT(10) #define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) #define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) @@ -411,7 +412,6 @@ struct ieee80211_if_mesh { * * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets * @IEEE80211_SDATA_PROMISC: interface is promisc - * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between * associated stations and deliver multicast frames both @@ -420,9 +420,8 @@ struct ieee80211_if_mesh { enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_ALLMULTI = BIT(0), IEEE80211_SDATA_PROMISC = BIT(1), - IEEE80211_SDATA_USERSPACE_MLME = BIT(2), - IEEE80211_SDATA_OPERATING_GMODE = BIT(3), - IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4), + IEEE80211_SDATA_OPERATING_GMODE = BIT(2), + IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), }; struct ieee80211_sub_if_data { diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 34f4798..dd2a276f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -261,8 +261,7 @@ static int ieee80211_open(struct net_device *dev) ieee80211_bss_info_change_notify(sdata, changed); ieee80211_enable_keys(sdata); - if (sdata->vif.type == NL80211_IFTYPE_STATION && - !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) + if (sdata->vif.type == NL80211_IFTYPE_STATION) netif_carrier_off(dev); else netif_carrier_on(dev); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b0808ef..c05be09 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -849,8 +849,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata) int wep_privacy; int privacy_invoked; - if (!ifmgd || (ifmgd->flags & (IEEE80211_STA_MIXED_CELL | - IEEE80211_STA_EXT_SME))) + if (!ifmgd || (ifmgd->flags & IEEE80211_STA_EXT_SME)) return 0; bss = ieee80211_rx_bss_get(local, ifmgd->bssid, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fcc0a59..47d395a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1877,18 +1877,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) if (ieee80211_vif_is_mesh(&sdata->vif)) return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); - if (sdata->vif.type != NL80211_IFTYPE_STATION && - sdata->vif.type != NL80211_IFTYPE_ADHOC) - return RX_DROP_MONITOR; - + if (sdata->vif.type != NL80211_IFTYPE_ADHOC) + return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); - if (sdata->vif.type == NL80211_IFTYPE_STATION) { - if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) - return RX_DROP_MONITOR; + if (sdata->vif.type == NL80211_IFTYPE_STATION) return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); - } - return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); + return RX_DROP_MONITOR; } static void ieee80211_rx_michael_mic_report(struct net_device *dev, diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index ce21d66..deb4ece 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -129,9 +129,6 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) - return -EOPNOTSUPP; - if (sdata->vif.type == NL80211_IFTYPE_STATION) { int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); if (ret) @@ -208,14 +205,6 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type == NL80211_IFTYPE_STATION) { - if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { - if (len > IEEE80211_MAX_SSID_LEN) - return -EINVAL; - memcpy(sdata->u.mgd.ssid, ssid, len); - sdata->u.mgd.ssid_len = len; - return 0; - } - if (data->flags) sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; else @@ -274,11 +263,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type == NL80211_IFTYPE_STATION) { int ret; - if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { - memcpy(sdata->u.mgd.bssid, (u8 *) &ap_addr->sa_data, - ETH_ALEN); - return 0; - } + if (is_zero_ether_addr((u8 *) &ap_addr->sa_data)) sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL; -- cgit v0.10.2 From 23b53f4f55d833ecc5a11b5fba646c78d3876927 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Sat, 21 Mar 2009 23:04:48 +0100 Subject: ar9170: hardware and eeprom header files hardware / firmware interface definitions for Atheros' AR9170 based devices. Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ar9170/eeprom.h b/drivers/net/wireless/ar9170/eeprom.h new file mode 100644 index 0000000..d2c8cc8 --- /dev/null +++ b/drivers/net/wireless/ar9170/eeprom.h @@ -0,0 +1,179 @@ +/* + * Atheros AR9170 driver + * + * EEPROM layout + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __AR9170_EEPROM_H +#define __AR9170_EEPROM_H + +#define AR5416_MAX_CHAINS 2 +#define AR5416_MODAL_SPURS 5 + +struct ar9170_eeprom_modal { + __le32 antCtrlChain[AR5416_MAX_CHAINS]; + __le32 antCtrlCommon; + s8 antennaGainCh[AR5416_MAX_CHAINS]; + u8 switchSettling; + u8 txRxAttenCh[AR5416_MAX_CHAINS]; + u8 rxTxMarginCh[AR5416_MAX_CHAINS]; + s8 adcDesiredSize; + s8 pgaDesiredSize; + u8 xlnaGainCh[AR5416_MAX_CHAINS]; + u8 txEndToXpaOff; + u8 txEndToRxOn; + u8 txFrameToXpaOn; + u8 thresh62; + s8 noiseFloorThreshCh[AR5416_MAX_CHAINS]; + u8 xpdGain; + u8 xpd; + s8 iqCalICh[AR5416_MAX_CHAINS]; + s8 iqCalQCh[AR5416_MAX_CHAINS]; + u8 pdGainOverlap; + u8 ob; + u8 db; + u8 xpaBiasLvl; + u8 pwrDecreaseFor2Chain; + u8 pwrDecreaseFor3Chain; + u8 txFrameToDataStart; + u8 txFrameToPaOn; + u8 ht40PowerIncForPdadc; + u8 bswAtten[AR5416_MAX_CHAINS]; + u8 bswMargin[AR5416_MAX_CHAINS]; + u8 swSettleHt40; + u8 reserved[22]; + struct spur_channel { + __le16 spurChan; + u8 spurRangeLow; + u8 spurRangeHigh; + } __packed spur_channels[AR5416_MODAL_SPURS]; +} __packed; + +#define AR5416_NUM_PD_GAINS 4 +#define AR5416_PD_GAIN_ICEPTS 5 + +struct ar9170_calibration_data_per_freq { + u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; + u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; +} __packed; + +#define AR5416_NUM_5G_CAL_PIERS 8 +#define AR5416_NUM_2G_CAL_PIERS 4 + +#define AR5416_NUM_5G_TARGET_PWRS 8 +#define AR5416_NUM_2G_CCK_TARGET_PWRS 3 +#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4 +#define AR5416_MAX_NUM_TGT_PWRS 8 + +struct ar9170_calibration_target_power_legacy { + u8 freq; + u8 power[4]; +} __packed; + +struct ar9170_calibration_target_power_ht { + u8 freq; + u8 power[8]; +} __packed; + +#define AR5416_NUM_CTLS 24 + +struct ar9170_calctl_edges { + u8 channel; +#define AR9170_CALCTL_EDGE_FLAGS 0xC0 + u8 power_flags; +} __packed; + +#define AR5416_NUM_BAND_EDGES 8 + +struct ar9170_calctl_data { + struct ar9170_calctl_edges + control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES]; +} __packed; + + +struct ar9170_eeprom { + __le16 length; + __le16 checksum; + __le16 version; + u8 operating_flags; +#define AR9170_OPFLAG_5GHZ 1 +#define AR9170_OPFLAG_2GHZ 2 + u8 misc; + __le16 reg_domain[2]; + u8 mac_address[6]; + u8 rx_mask; + u8 tx_mask; + __le16 rf_silent; + __le16 bluetooth_options; + __le16 device_capabilities; + __le32 build_number; + u8 deviceType; + u8 reserved[33]; + + u8 customer_data[64]; + + struct ar9170_eeprom_modal + modal_header[2]; + + u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS]; + u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS]; + + struct ar9170_calibration_data_per_freq + cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS], + cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS]; + + /* power calibration data */ + struct ar9170_calibration_target_power_legacy + cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS]; + struct ar9170_calibration_target_power_ht + cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS], + cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS]; + + struct ar9170_calibration_target_power_legacy + cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS], + cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS]; + struct ar9170_calibration_target_power_ht + cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS], + cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS]; + + /* conformance testing limits */ + u8 ctl_index[AR5416_NUM_CTLS]; + struct ar9170_calctl_data + ctl_data[AR5416_NUM_CTLS]; + + u8 pad; + __le16 subsystem_id; +} __packed; + +#endif /* __AR9170_EEPROM_H */ diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ar9170/hw.h new file mode 100644 index 0000000..ad205f8 --- /dev/null +++ b/drivers/net/wireless/ar9170/hw.h @@ -0,0 +1,406 @@ +/* + * Atheros AR9170 driver + * + * Hardware-specific definitions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __AR9170_HW_H +#define __AR9170_HW_H + +#define AR9170_MAX_CMD_LEN 64 + +enum ar9170_cmd { + AR9170_CMD_RREG = 0x00, + AR9170_CMD_WREG = 0x01, + AR9170_CMD_RMEM = 0x02, + AR9170_CMD_WMEM = 0x03, + AR9170_CMD_BITAND = 0x04, + AR9170_CMD_BITOR = 0x05, + AR9170_CMD_EKEY = 0x28, + AR9170_CMD_DKEY = 0x29, + AR9170_CMD_FREQUENCY = 0x30, + AR9170_CMD_RF_INIT = 0x31, + AR9170_CMD_SYNTH = 0x32, + AR9170_CMD_FREQ_START = 0x33, + AR9170_CMD_ECHO = 0x80, + AR9170_CMD_TALLY = 0x81, + AR9170_CMD_TALLY_APD = 0x82, + AR9170_CMD_CONFIG = 0x83, + AR9170_CMD_RESET = 0x90, + AR9170_CMD_DKRESET = 0x91, + AR9170_CMD_DKTX_STATUS = 0x92, + AR9170_CMD_FDC = 0xA0, + AR9170_CMD_WREEPROM = 0xB0, + AR9170_CMD_WFLASH = 0xB0, + AR9170_CMD_FLASH_ERASE = 0xB1, + AR9170_CMD_FLASH_PROG = 0xB2, + AR9170_CMD_FLASH_CHKSUM = 0xB3, + AR9170_CMD_FLASH_READ = 0xB4, + AR9170_CMD_FW_DL_INIT = 0xB5, + AR9170_CMD_MEM_WREEPROM = 0xBB, +}; + +/* endpoints */ +#define AR9170_EP_TX 1 +#define AR9170_EP_RX 2 +#define AR9170_EP_IRQ 3 +#define AR9170_EP_CMD 4 + +#define AR9170_EEPROM_START 0x1600 + +#define AR9170_GPIO_REG_BASE 0x1d0100 +#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE +#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4) +#define AR9170_NUM_LEDS 2 + + +#define AR9170_USB_REG_BASE 0x1e1000 +#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108) +#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1 +#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2 +#define AR9170_DMA_CTL_HIGH_SPEED 0x4 +#define AR9170_DMA_CTL_PACKET_MODE 0x8 + +#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110) +#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114) + + + +#define AR9170_MAC_REG_BASE 0x1c3000 + +#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514) +#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518) + +#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C) +#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520) +#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524) + +#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610) +#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614) +#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618) +#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c) + +#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624) +#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628) + +#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C) + +#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630) +#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634) +#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638) +#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c) +#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640) +#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C) + +#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658) +#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674) +#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0) +#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000 +#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678) +#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3) +#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70 + +#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680) +#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688) + +#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c) +#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0) +#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1) +#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2) +#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3) +#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4) +#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5) +#define AR9170_MAC_REG_FTF_BIT6 BIT(6) +#define AR9170_MAC_REG_FTF_BIT7 BIT(7) +#define AR9170_MAC_REG_FTF_BEACON BIT(8) +#define AR9170_MAC_REG_FTF_ATIM BIT(9) +#define AR9170_MAC_REG_FTF_DEASSOC BIT(10) +#define AR9170_MAC_REG_FTF_AUTH BIT(11) +#define AR9170_MAC_REG_FTF_DEAUTH BIT(12) +#define AR9170_MAC_REG_FTF_BIT13 BIT(13) +#define AR9170_MAC_REG_FTF_BIT14 BIT(14) +#define AR9170_MAC_REG_FTF_BIT15 BIT(15) +#define AR9170_MAC_REG_FTF_BAR BIT(24) +#define AR9170_MAC_REG_FTF_BIT25 BIT(25) +#define AR9170_MAC_REG_FTF_PSPOLL BIT(26) +#define AR9170_MAC_REG_FTF_RTS BIT(27) +#define AR9170_MAC_REG_FTF_CTS BIT(28) +#define AR9170_MAC_REG_FTF_ACK BIT(29) +#define AR9170_MAC_REG_FTF_CFE BIT(30) +#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31) +#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff +#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff + +#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0) +#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4) +#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8) +#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC) +#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0) +#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC) +#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC) +#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4) + + +#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690) +#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698) + +#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0) + +#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700) +#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0 +#define AR9170_MAC_REG_POWERMGT_AP 0xa1 +#define AR9170_MAC_REG_POWERMGT_STA 0x2 +#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3 +#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24) + +#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704) +#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708) + +#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00) +#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04) +#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08) +#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C) +#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10) +#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14) +#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18) + +#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28) + +#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0) +#define AR9170_MAC_FCS_SWFCS 0x1 +#define AR9170_MAC_FCS_FIFO_PROT 0x4 + + +#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30) + +#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44) +#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48) + +#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00) +#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50) + +#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C) +#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f +#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0 +#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000 +#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000 + +#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84) +#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88) +#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90) +#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94) +#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0) +#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4) + + +#define AR9170_PWR_REG_BASE 0x1D4000 + +#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008) +#define AR9170_PWR_CLK_AHB_40MHZ 0 +#define AR9170_PWR_CLK_AHB_20_22MHZ 1 +#define AR9170_PWR_CLK_AHB_40_44MHZ 2 +#define AR9170_PWR_CLK_AHB_80_88MHZ 3 +#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70 + + +/* put beacon here in memory */ +#define AR9170_BEACON_BUFFER_ADDRESS 0x117900 + + +struct ar9170_tx_control { + __le16 length; + __le16 mac_control; + __le32 phy_control; + u8 frame_data[0]; +} __packed; + +/* these are either-or */ +#define AR9170_TX_MAC_PROT_RTS 0x0001 +#define AR9170_TX_MAC_PROT_CTS 0x0002 + +#define AR9170_TX_MAC_NO_ACK 0x0004 +/* if unset, MAC will only do SIFS space before frame */ +#define AR9170_TX_MAC_BACKOFF 0x0008 +#define AR9170_TX_MAC_BURST 0x0010 +#define AR9170_TX_MAC_AGGR 0x0020 + +/* encryption is a two-bit field */ +#define AR9170_TX_MAC_ENCR_NONE 0x0000 +#define AR9170_TX_MAC_ENCR_RC4 0x0040 +#define AR9170_TX_MAC_ENCR_CENC 0x0080 +#define AR9170_TX_MAC_ENCR_AES 0x00c0 + +#define AR9170_TX_MAC_MMIC 0x0100 +#define AR9170_TX_MAC_HW_DURATION 0x0200 +#define AR9170_TX_MAC_QOS_SHIFT 10 +#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT) +#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400 +#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800 +#define AR9170_TX_MAC_DISABLE_TXOP 0x1000 +#define AR9170_TX_MAC_TXOP_RIFS 0x2000 +#define AR9170_TX_MAC_IMM_AMPDU 0x4000 +#define AR9170_TX_MAC_RATE_PROBE 0x8000 + +/* either-or */ +#define AR9170_TX_PHY_MOD_CCK 0x00000000 +#define AR9170_TX_PHY_MOD_OFDM 0x00000001 +#define AR9170_TX_PHY_MOD_HT 0x00000002 + +/* depends on modulation */ +#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004 +#define AR9170_TX_PHY_GREENFIELD 0x00000004 + +#define AR9170_TX_PHY_BW_SHIFT 3 +#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT) +#define AR9170_TX_PHY_BW_20MHZ 0 +#define AR9170_TX_PHY_BW_40MHZ 2 +#define AR9170_TX_PHY_BW_40MHZ_DUP 3 + +#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6 +#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT) + +#define AR9170_TX_PHY_TX_PWR_SHIFT 9 +#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT) + +/* not part of the hw-spec */ +#define AR9170_TX_PHY_QOS_SHIFT 25 +#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT) + +#define AR9170_TX_PHY_TXCHAIN_SHIFT 15 +#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT) +#define AR9170_TX_PHY_TXCHAIN_1 1 +/* use for cck, ofdm 6/9/12/18/24 and HT if capable */ +#define AR9170_TX_PHY_TXCHAIN_2 5 + +#define AR9170_TX_PHY_MCS_SHIFT 18 +#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT) + +#define AR9170_TX_PHY_SHORT_GI 0x80000000 + +struct ar9170_rx_head { + u8 plcp[12]; +}; + +struct ar9170_rx_tail { + union { + struct { + u8 rssi_ant0, rssi_ant1, rssi_ant2, + rssi_ant0x, rssi_ant1x, rssi_ant2x, + rssi_combined; + }; + u8 rssi[7]; + }; + + u8 evm_stream0[6], evm_stream1[6]; + u8 phy_err; + u8 SAidx, DAidx; + u8 error; + u8 status; +}; + +#define AR9170_ENC_ALG_NONE 0x0 +#define AR9170_ENC_ALG_WEP64 0x1 +#define AR9170_ENC_ALG_TKIP 0x2 +#define AR9170_ENC_ALG_AESCCMP 0x4 +#define AR9170_ENC_ALG_WEP128 0x5 +#define AR9170_ENC_ALG_WEP256 0x6 +#define AR9170_ENC_ALG_CENC 0x7 + +#define AR9170_RX_ENC_SOFTWARE 0x8 + +static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t) +{ + return (t->SAidx & 0xc0) >> 4 | + (t->DAidx & 0xc0) >> 6; +} + +#define AR9170_RX_STATUS_MODULATION_MASK 0x03 +#define AR9170_RX_STATUS_MODULATION_CCK 0x00 +#define AR9170_RX_STATUS_MODULATION_OFDM 0x01 +#define AR9170_RX_STATUS_MODULATION_HT 0x02 +#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03 + +/* depends on modulation */ +#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08 +#define AR9170_RX_STATUS_GREENFIELD 0x08 + +#define AR9170_RX_STATUS_MPDU_MASK 0x30 +#define AR9170_RX_STATUS_MPDU_SINGLE 0x00 +#define AR9170_RX_STATUS_MPDU_FIRST 0x10 +#define AR9170_RX_STATUS_MPDU_MIDDLE 0x20 +#define AR9170_RX_STATUS_MPDU_LAST 0x30 + + +#define AR9170_RX_ERROR_RXTO 0x01 +#define AR9170_RX_ERROR_OVERRUN 0x02 +#define AR9170_RX_ERROR_DECRYPT 0x04 +#define AR9170_RX_ERROR_FCS 0x08 +#define AR9170_RX_ERROR_WRONG_RA 0x10 +#define AR9170_RX_ERROR_PLCP 0x20 +#define AR9170_RX_ERROR_MMIC 0x40 + +struct ar9170_cmd_tx_status { + __le16 unkn; + u8 dst[ETH_ALEN]; + __le32 rate; + __le16 status; +} __packed; + +#define AR9170_TX_STATUS_COMPLETE 0x00 +#define AR9170_TX_STATUS_RETRY 0x01 +#define AR9170_TX_STATUS_FAILED 0x02 + +struct ar9170_cmd_ba_failed_count { + __le16 failed; + __le16 rate; +} __packed; + +struct ar9170_cmd_response { + u8 flag; + u8 type; + + union { + struct ar9170_cmd_tx_status tx_status; + struct ar9170_cmd_ba_failed_count ba_fail_cnt; + u8 data[0]; + }; +} __packed; + +/* mac80211 queue to HW/FW map */ +static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 }; + +/* HW/FW queue to mac80211 map */ +static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 }; + +#endif /* __AR9170_HW_H */ -- cgit v0.10.2 From e9348cdd280eb6a1d6d38fef513b578dc9ead363 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Sat, 21 Mar 2009 23:05:13 +0100 Subject: ar9170: ar9170: mac80211 interaction code This patch contains almost all mac80211 interaction code of AR9170. Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ar9170/ar9170.h b/drivers/net/wireless/ar9170/ar9170.h new file mode 100644 index 0000000..c49d713 --- /dev/null +++ b/drivers/net/wireless/ar9170/ar9170.h @@ -0,0 +1,209 @@ +/* + * Atheros AR9170 driver + * + * Driver specific definitions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __AR9170_H +#define __AR9170_H + +#include +#include +#include +#include +#ifdef CONFIG_AR9170_LEDS +#include +#endif /* CONFIG_AR9170_LEDS */ +#include "eeprom.h" +#include "hw.h" + +#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1) + +enum ar9170_bw { + AR9170_BW_20, + AR9170_BW_40_BELOW, + AR9170_BW_40_ABOVE, + + __AR9170_NUM_BW, +}; + +enum ar9170_rf_init_mode { + AR9170_RFI_NONE, + AR9170_RFI_WARM, + AR9170_RFI_COLD, +}; + +#define AR9170_MAX_RX_BUFFER_SIZE 8192 + +#ifdef CONFIG_AR9170_LEDS +struct ar9170; + +struct ar9170_led { + struct ar9170 *ar; + struct led_classdev l; + char name[32]; + unsigned int toggled; + bool registered; +}; + +#endif /* CONFIG_AR9170_LEDS */ + +enum ar9170_device_state { + AR9170_UNKNOWN_STATE, + AR9170_STOPPED, + AR9170_IDLE, + AR9170_STARTED, + AR9170_ASSOCIATED, +}; + +struct ar9170 { + struct ieee80211_hw *hw; + struct mutex mutex; + enum ar9170_device_state state; + + int (*open)(struct ar9170 *); + void (*stop)(struct ar9170 *); + int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int); + int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 , + void *, u32 , void *); + void (*callback_cmd)(struct ar9170 *, u32 , void *); + + /* interface mode settings */ + struct ieee80211_vif *vif; + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + + /* beaconing */ + struct sk_buff *beacon; + struct work_struct beacon_work; + + /* cryptographic engine */ + u64 usedkeys; + bool rx_software_decryption; + bool disable_offload; + + /* filter settings */ + struct work_struct filter_config_work; + u64 cur_mc_hash, want_mc_hash; + u32 cur_filter, want_filter; + unsigned int filter_changed; + bool sniffer_enabled; + + /* PHY */ + struct ieee80211_channel *channel; + int noise[4]; + + /* power calibration data */ + u8 power_5G_leg[4]; + u8 power_2G_cck[4]; + u8 power_2G_ofdm[4]; + u8 power_5G_ht20[8]; + u8 power_5G_ht40[8]; + u8 power_2G_ht20[8]; + u8 power_2G_ht40[8]; + +#ifdef CONFIG_AR9170_LEDS + struct delayed_work led_work; + struct ar9170_led leds[AR9170_NUM_LEDS]; +#endif /* CONFIG_AR9170_LEDS */ + + /* qos queue settings */ + spinlock_t tx_stats_lock; + struct ieee80211_tx_queue_stats tx_stats[5]; + struct ieee80211_tx_queue_params edcf[5]; + + spinlock_t cmdlock; + __le32 cmdbuf[PAYLOAD_MAX + 1]; + + /* MAC statistics */ + struct ieee80211_low_level_stats stats; + + /* EEPROM */ + struct ar9170_eeprom eeprom; + + /* global tx status for unregistered Stations. */ + struct sk_buff_head global_tx_status; + struct sk_buff_head global_tx_status_waste; + struct delayed_work tx_status_janitor; +}; + +struct ar9170_sta_info { + struct sk_buff_head tx_status; +}; + +#define IS_STARTED(a) (a->state >= AR9170_STARTED) +#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE) + +#define AR9170_FILTER_CHANGED_PROMISC BIT(0) +#define AR9170_FILTER_CHANGED_MULTICAST BIT(1) +#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2) + +/* exported interface */ +void *ar9170_alloc(size_t priv_size); +int ar9170_register(struct ar9170 *ar, struct device *pdev); +void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb); +void ar9170_unregister(struct ar9170 *ar); +void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, + bool update_statistics, u16 tx_status); + +/* MAC */ +int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int ar9170_init_mac(struct ar9170 *ar); +int ar9170_set_qos(struct ar9170 *ar); +int ar9170_update_multicast(struct ar9170 *ar); +int ar9170_update_frame_filter(struct ar9170 *ar); +int ar9170_set_operating_mode(struct ar9170 *ar); +int ar9170_set_beacon_timers(struct ar9170 *ar); +int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry); +int ar9170_update_beacon(struct ar9170 *ar); +void ar9170_new_beacon(struct work_struct *work); +int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype, + u8 keyidx, u8 *keydata, int keylen); +int ar9170_disable_key(struct ar9170 *ar, u8 id); + +/* LEDs */ +#ifdef CONFIG_AR9170_LEDS +int ar9170_register_leds(struct ar9170 *ar); +void ar9170_unregister_leds(struct ar9170 *ar); +#endif /* CONFIG_AR9170_LEDS */ +int ar9170_init_leds(struct ar9170 *ar); +int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state); + +/* PHY / RF */ +int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band); +int ar9170_init_rf(struct ar9170 *ar); +int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, + enum ar9170_rf_init_mode rfi, enum ar9170_bw bw); + +#endif /* __AR9170_H */ diff --git a/drivers/net/wireless/ar9170/cmd.c b/drivers/net/wireless/ar9170/cmd.c new file mode 100644 index 0000000..fd5625c --- /dev/null +++ b/drivers/net/wireless/ar9170/cmd.c @@ -0,0 +1,130 @@ +/* + * Atheros AR9170 driver + * + * Basic HW register/memory/command access functions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ar9170.h" +#include "cmd.h" + +int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len) +{ + int err; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return 0; + + err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL); + if (err) + printk(KERN_DEBUG "%s: writing memory failed\n", + wiphy_name(ar->hw->wiphy)); + return err; +} + +int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) +{ + __le32 buf[2] = { + cpu_to_le32(reg), + cpu_to_le32(val), + }; + int err; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return 0; + + err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf), + (u8 *) buf, 0, NULL); + if (err) + printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n", + wiphy_name(ar->hw->wiphy), reg, val); + return err; +} + +static int ar9170_read_mreg(struct ar9170 *ar, int nregs, + const u32 *regs, u32 *out) +{ + int i, err; + __le32 *offs, *res; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return 0; + + /* abuse "out" for the register offsets, must be same length */ + offs = (__le32 *)out; + for (i = 0; i < nregs; i++) + offs[i] = cpu_to_le32(regs[i]); + + /* also use the same buffer for the input */ + res = (__le32 *)out; + + err = ar->exec_cmd(ar, AR9170_CMD_RREG, + 4 * nregs, (u8 *)offs, + 4 * nregs, (u8 *)res); + if (err) + return err; + + /* convert result to cpu endian */ + for (i = 0; i < nregs; i++) + out[i] = le32_to_cpu(res[i]); + + return 0; +} + +int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val) +{ + return ar9170_read_mreg(ar, 1, ®, val); +} + +int ar9170_echo_test(struct ar9170 *ar, u32 v) +{ + __le32 echobuf = cpu_to_le32(v); + __le32 echores; + int err; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return -ENODEV; + + err = ar->exec_cmd(ar, AR9170_CMD_ECHO, + 4, (u8 *)&echobuf, + 4, (u8 *)&echores); + if (err) + return err; + + if (echobuf != echores) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(ar9170_echo_test); diff --git a/drivers/net/wireless/ar9170/cmd.h b/drivers/net/wireless/ar9170/cmd.h new file mode 100644 index 0000000..a4f0e50 --- /dev/null +++ b/drivers/net/wireless/ar9170/cmd.h @@ -0,0 +1,91 @@ +/* + * Atheros AR9170 driver + * + * Basic HW register/memory/command access functions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CMD_H +#define __CMD_H + +#include "ar9170.h" + +/* basic HW access */ +int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len); +int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val); +int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val); +int ar9170_echo_test(struct ar9170 *ar, u32 v); + +/* + * Macros to facilitate writing multiple registers in a single + * write-combining USB command. Note that when the first group + * fails the whole thing will fail without any others attempted, + * but you won't know which write in the group failed. + */ +#define ar9170_regwrite_begin(ar) \ +do { \ + int __nreg = 0, __err = 0; \ + struct ar9170 *__ar = ar; + +#define ar9170_regwrite(r, v) do { \ + __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \ + __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \ + __nreg++; \ + if ((__nreg >= PAYLOAD_MAX/2)) { \ + if (IS_ACCEPTING_CMD(__ar)) \ + __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \ + 8 * __nreg, \ + (u8 *) &__ar->cmdbuf[1], \ + 0, NULL); \ + __nreg = 0; \ + if (__err) \ + goto __regwrite_out; \ + } \ +} while (0) + +#define ar9170_regwrite_finish() \ +__regwrite_out : \ + if (__nreg) { \ + if (IS_ACCEPTING_CMD(__ar)) \ + __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \ + 8 * __nreg, \ + (u8 *) &__ar->cmdbuf[1], \ + 0, NULL); \ + __nreg = 0; \ + } + +#define ar9170_regwrite_result() \ + __err; \ +} while (0); + +#endif /* __CMD_H */ diff --git a/drivers/net/wireless/ar9170/led.c b/drivers/net/wireless/ar9170/led.c new file mode 100644 index 0000000..341cead --- /dev/null +++ b/drivers/net/wireless/ar9170/led.c @@ -0,0 +1,171 @@ +/* + * Atheros AR9170 driver + * + * LED handling + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ar9170.h" +#include "cmd.h" + +int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state) +{ + return ar9170_write_reg(ar, AR9170_GPIO_REG_DATA, led_state); +} + +int ar9170_init_leds(struct ar9170 *ar) +{ + int err; + + /* disable LEDs */ + /* GPIO [0/1 mode: output, 2/3: input] */ + err = ar9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3); + if (err) + goto out; + + /* GPIO 0/1 value: off */ + err = ar9170_set_leds_state(ar, 0); + +out: + return err; +} + +#ifdef CONFIG_AR9170_LEDS +static void ar9170_update_leds(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, led_work.work); + int i, tmp, blink_delay = 1000; + u32 led_val = 0; + bool rerun = false; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return ; + + mutex_lock(&ar->mutex); + for (i = 0; i < AR9170_NUM_LEDS; i++) + if (ar->leds[i].toggled) { + led_val |= 1 << i; + + tmp = 70 + 200 / (ar->leds[i].toggled); + if (tmp < blink_delay) + blink_delay = tmp; + + if (ar->leds[i].toggled > 1) + ar->leds[i].toggled = 0; + + rerun = true; + } + + ar9170_set_leds_state(ar, led_val); + mutex_unlock(&ar->mutex); + + if (rerun) + queue_delayed_work(ar->hw->workqueue, &ar->led_work, + msecs_to_jiffies(blink_delay)); +} + +static void ar9170_led_brightness_set(struct led_classdev *led, + enum led_brightness brightness) +{ + struct ar9170_led *arl = container_of(led, struct ar9170_led, l); + struct ar9170 *ar = arl->ar; + + arl->toggled++; + + if (likely(IS_ACCEPTING_CMD(ar) && brightness)) + queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10); +} + +static int ar9170_register_led(struct ar9170 *ar, int i, char *name, + char *trigger) +{ + int err; + + snprintf(ar->leds[i].name, sizeof(ar->leds[i].name), + "ar9170-%s::%s", wiphy_name(ar->hw->wiphy), name); + + ar->leds[i].ar = ar; + ar->leds[i].l.name = ar->leds[i].name; + ar->leds[i].l.brightness_set = ar9170_led_brightness_set; + ar->leds[i].l.brightness = 0; + ar->leds[i].l.default_trigger = trigger; + + err = led_classdev_register(wiphy_dev(ar->hw->wiphy), + &ar->leds[i].l); + if (err) + printk(KERN_ERR "%s: failed to register %s LED (%d).\n", + wiphy_name(ar->hw->wiphy), ar->leds[i].name, err); + else + ar->leds[i].registered = true; + + return err; +} + +void ar9170_unregister_leds(struct ar9170 *ar) +{ + int i; + + cancel_delayed_work_sync(&ar->led_work); + + for (i = 0; i < AR9170_NUM_LEDS; i++) + if (ar->leds[i].registered) { + led_classdev_unregister(&ar->leds[i].l); + ar->leds[i].registered = false; + } +} + +int ar9170_register_leds(struct ar9170 *ar) +{ + int err; + + INIT_DELAYED_WORK(&ar->led_work, ar9170_update_leds); + + err = ar9170_register_led(ar, 0, "tx", + ieee80211_get_tx_led_name(ar->hw)); + if (err) + goto fail; + + err = ar9170_register_led(ar, 1, "assoc", + ieee80211_get_assoc_led_name(ar->hw)); + if (err) + goto fail; + + return 0; + +fail: + ar9170_unregister_leds(ar); + return err; +} + +#endif /* CONFIG_AR9170_LEDS */ diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c new file mode 100644 index 0000000..a3b5323 --- /dev/null +++ b/drivers/net/wireless/ar9170/main.c @@ -0,0 +1,1776 @@ +/* + * Atheros AR9170 driver + * + * mac80211 interaction code + * + * Copyright 2008, Johannes Berg + * Copyright 2009, Christian Lamparter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * BIG FAT TODO: + * + * By the looks of things: these devices share a lot of things like + * EEPROM layout/design and PHY code with other Atheros WIFI products. + * So this driver/library will eventually become ath9k code... or vice versa ;-) + */ + +#include +#include +#include +#include +#include "ar9170.h" +#include "hw.h" +#include "cmd.h" + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); +MODULE_AUTHOR("Johannes Berg "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Atheros shared code for AR9170 wireless devices"); + +#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ + .bitrate = (_bitrate), \ + .flags = (_flags), \ + .hw_value = (_hw_rate) | (_txpidx) << 4, \ +} + +static struct ieee80211_rate __ar9170_ratetable[] = { + RATE(10, 0, 0, 0), + RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(60, 0xb, 0, 0), + RATE(90, 0xf, 0, 0), + RATE(120, 0xa, 0, 0), + RATE(180, 0xe, 0, 0), + RATE(240, 0x9, 0, 0), + RATE(360, 0xd, 1, 0), + RATE(480, 0x8, 2, 0), + RATE(540, 0xc, 3, 0), +}; +#undef RATE + +#define ar9170_g_ratetable (__ar9170_ratetable + 0) +#define ar9170_g_ratetable_size 12 +#define ar9170_a_ratetable (__ar9170_ratetable + 4) +#define ar9170_a_ratetable_size 8 + +/* + * NB: The hw_value is used as an index into the ar9170_phy_freq_params + * array in phy.c so that we don't have to do frequency lookups! + */ +#define CHAN(_freq, _idx) { \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 18, /* XXX */ \ +} + +static struct ieee80211_channel ar9170_2ghz_chantable[] = { + CHAN(2412, 0), + CHAN(2417, 1), + CHAN(2422, 2), + CHAN(2427, 3), + CHAN(2432, 4), + CHAN(2437, 5), + CHAN(2442, 6), + CHAN(2447, 7), + CHAN(2452, 8), + CHAN(2457, 9), + CHAN(2462, 10), + CHAN(2467, 11), + CHAN(2472, 12), + CHAN(2484, 13), +}; + +static struct ieee80211_channel ar9170_5ghz_chantable[] = { + CHAN(4920, 14), + CHAN(4940, 15), + CHAN(4960, 16), + CHAN(4980, 17), + CHAN(5040, 18), + CHAN(5060, 19), + CHAN(5080, 20), + CHAN(5180, 21), + CHAN(5200, 22), + CHAN(5220, 23), + CHAN(5240, 24), + CHAN(5260, 25), + CHAN(5280, 26), + CHAN(5300, 27), + CHAN(5320, 28), + CHAN(5500, 29), + CHAN(5520, 30), + CHAN(5540, 31), + CHAN(5560, 32), + CHAN(5580, 33), + CHAN(5600, 34), + CHAN(5620, 35), + CHAN(5640, 36), + CHAN(5660, 37), + CHAN(5680, 38), + CHAN(5700, 39), + CHAN(5745, 40), + CHAN(5765, 41), + CHAN(5785, 42), + CHAN(5805, 43), + CHAN(5825, 44), + CHAN(5170, 45), + CHAN(5190, 46), + CHAN(5210, 47), + CHAN(5230, 48), +}; +#undef CHAN + +static struct ieee80211_supported_band ar9170_band_2GHz = { + .channels = ar9170_2ghz_chantable, + .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable), + .bitrates = ar9170_g_ratetable, + .n_bitrates = ar9170_g_ratetable_size, +}; + +#ifdef AR9170_QUEUE_DEBUG +/* + * In case some wants works with AR9170's crazy tx_status queueing techniques. + * He might need this rather useful probing function. + * + * NOTE: caller must hold the queue's spinlock! + */ + +static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ar9170_tx_control *txc = (void *) skb->data; + struct ieee80211_hdr *hdr = (void *)txc->frame_data; + + printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] " + "mac_control:%04x, phy_control:%08x]\n", + wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), + ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control), + le32_to_cpu(txc->phy_control)); +} + +static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar, + struct sk_buff_head *queue) +{ + struct sk_buff *skb; + int i = 0; + + printk(KERN_DEBUG "---[ cut here ]---\n"); + printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n", + wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); + + skb_queue_walk(queue, skb) { + struct ar9170_tx_control *txc = (void *) skb->data; + struct ieee80211_hdr *hdr = (void *)txc->frame_data; + + printk(KERN_DEBUG "index:%d => \n", i); + ar9170_print_txheader(ar, skb); + } + printk(KERN_DEBUG "---[ end ]---\n"); +} +#endif /* AR9170_QUEUE_DEBUG */ + +static struct ieee80211_supported_band ar9170_band_5GHz = { + .channels = ar9170_5ghz_chantable, + .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable), + .bitrates = ar9170_a_ratetable, + .n_bitrates = ar9170_a_ratetable_size, +}; + +void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, + bool valid_status, u16 tx_status) +{ + struct ieee80211_tx_info *txinfo; + unsigned int retries = 0, queue = skb_get_queue_mapping(skb); + unsigned long flags; + + spin_lock_irqsave(&ar->tx_stats_lock, flags); + ar->tx_stats[queue].len--; + if (ieee80211_queue_stopped(ar->hw, queue)) + ieee80211_wake_queue(ar->hw, queue); + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + + txinfo = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(txinfo); + + switch (tx_status) { + case AR9170_TX_STATUS_RETRY: + retries = 2; + case AR9170_TX_STATUS_COMPLETE: + txinfo->flags |= IEEE80211_TX_STAT_ACK; + break; + + case AR9170_TX_STATUS_FAILED: + retries = ar->hw->conf.long_frame_max_tx_count; + break; + + default: + printk(KERN_ERR "%s: invalid tx_status response (%x).\n", + wiphy_name(ar->hw->wiphy), tx_status); + break; + } + + if (valid_status) + txinfo->status.rates[0].count = retries + 1; + + skb_pull(skb, sizeof(struct ar9170_tx_control)); + ieee80211_tx_status_irqsafe(ar->hw, skb); +} +EXPORT_SYMBOL_GPL(ar9170_handle_tx_status); + +static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, + const u8 *mac, + const u32 queue, + struct sk_buff_head *q) +{ + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&q->lock, flags); + skb_queue_walk(q, skb) { + struct ar9170_tx_control *txc = (void *) skb->data; + struct ieee80211_hdr *hdr = (void *) txc->frame_data; + u32 txc_queue = (le32_to_cpu(txc->phy_control) & + AR9170_TX_PHY_QOS_MASK) >> + AR9170_TX_PHY_QOS_SHIFT; + + if ((queue != txc_queue) || + (compare_ether_addr(ieee80211_get_DA(hdr), mac))) + continue; + + __skb_unlink(skb, q); + spin_unlock_irqrestore(&q->lock, flags); + return skb; + } + spin_unlock_irqrestore(&q->lock, flags); + return NULL; +} + +static struct sk_buff *ar9170_find_skb_in_queue_by_mac(struct ar9170 *ar, + const u8 *mac, + struct sk_buff_head *q) +{ + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&q->lock, flags); + skb_queue_walk(q, skb) { + struct ar9170_tx_control *txc = (void *) skb->data; + struct ieee80211_hdr *hdr = (void *) txc->frame_data; + + if (compare_ether_addr(ieee80211_get_DA(hdr), mac)) + continue; + + __skb_unlink(skb, q); + spin_unlock_irqrestore(&q->lock, flags); + return skb; + } + spin_unlock_irqrestore(&q->lock, flags); + return NULL; +} + +static struct sk_buff *ar9170_find_skb_in_queue_by_txcq(struct ar9170 *ar, + const u32 queue, + struct sk_buff_head *q) +{ + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&q->lock, flags); + skb_queue_walk(q, skb) { + struct ar9170_tx_control *txc = (void *) skb->data; + u32 txc_queue = (le32_to_cpu(txc->phy_control) & + AR9170_TX_PHY_QOS_MASK) >> + AR9170_TX_PHY_QOS_SHIFT; + + if (queue != txc_queue) + continue; + + __skb_unlink(skb, q); + spin_unlock_irqrestore(&q->lock, flags); + return skb; + } + spin_unlock_irqrestore(&q->lock, flags); + return NULL; +} + +static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, + const u32 queue) +{ + struct ieee80211_sta *sta; + struct sk_buff *skb; + + /* + * Unfortunately, the firmware does not tell to which (queued) frame + * this transmission status report belongs to. + * + * So we have to make risky guesses - with the scarce information + * the firmware provided (-> destination MAC, and phy_control) - + * and hope that we picked the right one... + */ + rcu_read_lock(); + sta = ieee80211_find_sta(ar->hw, mac); + + if (likely(sta)) { + struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv; + skb = ar9170_find_skb_in_queue_by_txcq(ar, queue, + &sta_priv->tx_status); + } else { + /* STA is not in database (yet/anymore). */ + + /* scan the waste bin for likely candidates */ + skb = ar9170_find_skb_in_queue(ar, mac, queue, + &ar->global_tx_status_waste); + if (!skb) { + /* so it still _must_ be in the global list. */ + skb = ar9170_find_skb_in_queue(ar, mac, queue, + &ar->global_tx_status); + } + } + rcu_read_unlock(); + +#ifdef AR9170_QUEUE_DEBUG + if (unlikely((!skb) && net_ratelimit())) { + printk(KERN_ERR "%s: ESS:[%pM] does not have any " + "outstanding frames in this queue (%d).\n", + wiphy_name(ar->hw->wiphy), mac, queue); + } +#endif /* AR9170_QUEUE_DEBUG */ + + return skb; +} + +/* + * This worker tries to keep the global tx_status queue empty. + * So we can guarantee that incoming tx_status reports for + * unregistered stations are always synced with the actual + * frame - which we think - belongs to. + */ + +static void ar9170_tx_status_janitor(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, + filter_config_work); + struct sk_buff *skb; + + /* recycle the garbage back to mac80211... one by one. */ + while ((skb = skb_dequeue(&ar->global_tx_status_waste))) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: dispose queued frame =>\n", + wiphy_name(ar->hw->wiphy)); + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + ar9170_handle_tx_status(ar, skb, false, + AR9170_TX_STATUS_FAILED); + } + + + + while ((skb = skb_dequeue(&ar->global_tx_status))) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: moving frame into waste queue =>\n", + wiphy_name(ar->hw->wiphy)); + + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + skb_queue_tail(&ar->global_tx_status_waste, skb); + } + + /* recall the janitor in 100ms - if there's garbage in the can. */ + if (skb_queue_len(&ar->global_tx_status_waste) > 0) + queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, + msecs_to_jiffies(100)); +} + +static void ar9170_handle_command_response(struct ar9170 *ar, + void *buf, u32 len) +{ + struct ar9170_cmd_response *cmd = (void *) buf; + + if ((cmd->type & 0xc0) != 0xc0) { + ar->callback_cmd(ar, len, buf); + return; + } + + /* hardware event handlers */ + switch (cmd->type) { + case 0xc1: { + /* + * TX status notification: + * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1 + * + * XX always 81 + * YY always 00 + * M1-M6 is the MAC address + * R1-R4 is the transmit rate + * S1-S2 is the transmit status + */ + + struct sk_buff *skb; + u32 queue = (le32_to_cpu(cmd->tx_status.rate) & + AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT; + + skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue); + if (unlikely(!skb)) + return ; + + ar9170_handle_tx_status(ar, skb, true, + le16_to_cpu(cmd->tx_status.status)); + break; + } + + case 0xc0: + /* + * pre-TBTT event + */ + if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP) + queue_work(ar->hw->workqueue, &ar->beacon_work); + break; + + case 0xc2: + /* + * (IBSS) beacon send notification + * bytes: 04 c2 XX YY B4 B3 B2 B1 + * + * XX always 80 + * YY always 00 + * B1-B4 "should" be the number of send out beacons. + */ + break; + + case 0xc3: + /* End of Atim Window */ + break; + + case 0xc4: + case 0xc5: + /* BlockACK events */ + break; + + case 0xc6: + /* Watchdog Interrupt */ + break; + + case 0xc9: + /* retransmission issue / SIFS/EIFS collision ?! */ + break; + + default: + printk(KERN_INFO "received unhandled event %x\n", cmd->type); + print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); + break; + } +} + +/* + * If the frame alignment is right (or the kernel has + * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there + * is only a single MPDU in the USB frame, then we can + * submit to mac80211 the SKB directly. However, since + * there may be multiple packets in one SKB in stream + * mode, and we need to observe the proper ordering, + * this is non-trivial. + */ +static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) +{ + struct sk_buff *skb; + struct ar9170_rx_head *head = (void *)buf; + struct ar9170_rx_tail *tail; + struct ieee80211_rx_status status; + int mpdu_len, i; + u8 error, antennas = 0, decrypt; + __le16 fc; + int reserved; + + if (unlikely(!IS_STARTED(ar))) + return ; + + /* Received MPDU */ + mpdu_len = len; + mpdu_len -= sizeof(struct ar9170_rx_head); + mpdu_len -= sizeof(struct ar9170_rx_tail); + BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12); + BUILD_BUG_ON(sizeof(struct ar9170_rx_tail) != 24); + + if (mpdu_len <= FCS_LEN) + return; + + tail = (void *)(buf + sizeof(struct ar9170_rx_head) + mpdu_len); + + for (i = 0; i < 3; i++) + if (tail->rssi[i] != 0x80) + antennas |= BIT(i); + + /* post-process RSSI */ + for (i = 0; i < 7; i++) + if (tail->rssi[i] & 0x80) + tail->rssi[i] = ((tail->rssi[i] & 0x7f) + 1) & 0x7f; + + memset(&status, 0, sizeof(status)); + + status.band = ar->channel->band; + status.freq = ar->channel->center_freq; + status.signal = ar->noise[0] + tail->rssi_combined; + status.noise = ar->noise[0]; + status.antenna = antennas; + + switch (tail->status & AR9170_RX_STATUS_MODULATION_MASK) { + case AR9170_RX_STATUS_MODULATION_CCK: + if (tail->status & AR9170_RX_STATUS_SHORT_PREAMBLE) + status.flag |= RX_FLAG_SHORTPRE; + switch (head->plcp[0]) { + case 0x0a: + status.rate_idx = 0; + break; + case 0x14: + status.rate_idx = 1; + break; + case 0x37: + status.rate_idx = 2; + break; + case 0x6e: + status.rate_idx = 3; + break; + default: + if ((!ar->sniffer_enabled) && (net_ratelimit())) + printk(KERN_ERR "%s: invalid plcp cck rate " + "(%x).\n", wiphy_name(ar->hw->wiphy), + head->plcp[0]); + return; + } + break; + case AR9170_RX_STATUS_MODULATION_OFDM: + switch (head->plcp[0] & 0xF) { + case 0xB: + status.rate_idx = 0; + break; + case 0xF: + status.rate_idx = 1; + break; + case 0xA: + status.rate_idx = 2; + break; + case 0xE: + status.rate_idx = 3; + break; + case 0x9: + status.rate_idx = 4; + break; + case 0xD: + status.rate_idx = 5; + break; + case 0x8: + status.rate_idx = 6; + break; + case 0xC: + status.rate_idx = 7; + break; + default: + if ((!ar->sniffer_enabled) && (net_ratelimit())) + printk(KERN_ERR "%s: invalid plcp ofdm rate " + "(%x).\n", wiphy_name(ar->hw->wiphy), + head->plcp[0]); + return; + } + if (status.band == IEEE80211_BAND_2GHZ) + status.rate_idx += 4; + break; + case AR9170_RX_STATUS_MODULATION_HT: + case AR9170_RX_STATUS_MODULATION_DUPOFDM: + /* XXX */ + + if (net_ratelimit()) + printk(KERN_ERR "%s: invalid modulation\n", + wiphy_name(ar->hw->wiphy)); + return; + } + + error = tail->error; + + if (error & AR9170_RX_ERROR_MMIC) { + status.flag |= RX_FLAG_MMIC_ERROR; + error &= ~AR9170_RX_ERROR_MMIC; + } + + if (error & AR9170_RX_ERROR_PLCP) { + status.flag |= RX_FLAG_FAILED_PLCP_CRC; + error &= ~AR9170_RX_ERROR_PLCP; + } + + if (error & AR9170_RX_ERROR_FCS) { + status.flag |= RX_FLAG_FAILED_FCS_CRC; + error &= ~AR9170_RX_ERROR_FCS; + } + + decrypt = ar9170_get_decrypt_type(tail); + if (!(decrypt & AR9170_RX_ENC_SOFTWARE) && + decrypt != AR9170_ENC_ALG_NONE) + status.flag |= RX_FLAG_DECRYPTED; + + /* ignore wrong RA errors */ + error &= ~AR9170_RX_ERROR_WRONG_RA; + + if (error & AR9170_RX_ERROR_DECRYPT) { + error &= ~AR9170_RX_ERROR_DECRYPT; + + /* + * Rx decryption is done in place, + * the original data is lost anyway. + */ + return ; + } + + /* drop any other error frames */ + if ((error) && (net_ratelimit())) { + printk(KERN_DEBUG "%s: errors: %#x\n", + wiphy_name(ar->hw->wiphy), error); + return; + } + + buf += sizeof(struct ar9170_rx_head); + fc = *(__le16 *)buf; + + if (ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc)) + reserved = 32 + 2; + else + reserved = 32; + + skb = dev_alloc_skb(mpdu_len + reserved); + if (!skb) + return; + + skb_reserve(skb, reserved); + memcpy(skb_put(skb, mpdu_len), buf, mpdu_len); + ieee80211_rx_irqsafe(ar->hw, skb, &status); +} + +/* + * TODO: + * It looks like AR9170 supports more than just the USB transport interface. + * Unfortunately, there is no available information what parts of the + * precendent and following code fragments is device specific and what not. + * For now, everything stays here, until some SPI chips pop up. + */ +void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) +{ + unsigned int i, tlen, resplen; + u8 *tbuf, *respbuf; + + tbuf = skb->data; + tlen = skb->len; + + while (tlen >= 4) { + int clen = tbuf[1] << 8 | tbuf[0]; + int wlen = (clen + 3) & ~3; + + /* + * parse stream (if any) + */ + if (tbuf[2] != 0 || tbuf[3] != 0x4e) { + printk(KERN_ERR "%s: missing tag!\n", + wiphy_name(ar->hw->wiphy)); + return ; + } + if (wlen > tlen - 4) { + printk(KERN_ERR "%s: invalid RX (%d, %d, %d)\n", + wiphy_name(ar->hw->wiphy), clen, wlen, tlen); + print_hex_dump(KERN_DEBUG, "data: ", + DUMP_PREFIX_OFFSET, + 16, 1, tbuf, tlen, true); + return ; + } + resplen = clen; + respbuf = tbuf + 4; + tbuf += wlen + 4; + tlen -= wlen + 4; + + i = 0; + + /* weird thing, but this is the same in the original driver */ + while (resplen > 2 && i < 12 && + respbuf[0] == 0xff && respbuf[1] == 0xff) { + i += 2; + resplen -= 2; + respbuf += 2; + } + + if (resplen < 4) + continue; + + /* found the 6 * 0xffff marker? */ + if (i == 12) + ar9170_handle_command_response(ar, respbuf, resplen); + else + ar9170_handle_mpdu(ar, respbuf, resplen); + } + + if (tlen) + printk(KERN_ERR "%s: buffer remains!\n", + wiphy_name(ar->hw->wiphy)); +} +EXPORT_SYMBOL_GPL(ar9170_rx); + +#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \ +do { \ + queue.aifs = ai_fs; \ + queue.cw_min = cwmin; \ + queue.cw_max = cwmax; \ + queue.txop = _txop; \ +} while (0) + +static int ar9170_op_start(struct ieee80211_hw *hw) +{ + struct ar9170 *ar = hw->priv; + int err, i; + + mutex_lock(&ar->mutex); + + /* reinitialize queues statistics */ + memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); + for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++) + ar->tx_stats[i].limit = 8; + + /* reset QoS defaults */ + AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/ + AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */ + AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */ + AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */ + AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */ + + err = ar->open(ar); + if (err) + goto out; + + err = ar9170_init_mac(ar); + if (err) + goto out; + + err = ar9170_set_qos(ar); + if (err) + goto out; + + err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ); + if (err) + goto out; + + err = ar9170_init_rf(ar); + if (err) + goto out; + + /* start DMA */ + err = ar9170_write_reg(ar, 0x1c3d30, 0x100); + if (err) + goto out; + + ar->state = AR9170_STARTED; + +out: + mutex_unlock(&ar->mutex); + return err; +} + +static void ar9170_op_stop(struct ieee80211_hw *hw) +{ + struct ar9170 *ar = hw->priv; + + if (IS_STARTED(ar)) + ar->state = AR9170_IDLE; + + mutex_lock(&ar->mutex); + + cancel_delayed_work_sync(&ar->tx_status_janitor); + cancel_work_sync(&ar->filter_config_work); + cancel_work_sync(&ar->beacon_work); + skb_queue_purge(&ar->global_tx_status_waste); + skb_queue_purge(&ar->global_tx_status); + + if (IS_ACCEPTING_CMD(ar)) { + ar9170_set_leds_state(ar, 0); + + /* stop DMA */ + ar9170_write_reg(ar, 0x1c3d30, 0); + ar->stop(ar); + } + + mutex_unlock(&ar->mutex); +} + +int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ar9170 *ar = hw->priv; + struct ieee80211_hdr *hdr; + struct ar9170_tx_control *txc; + struct ieee80211_tx_info *info; + struct ieee80211_rate *rate = NULL; + struct ieee80211_tx_rate *txrate; + unsigned int queue = skb_get_queue_mapping(skb); + unsigned long flags = 0; + struct ar9170_sta_info *sta_info = NULL; + u32 power, chains; + u16 keytype = 0; + u16 len, icv = 0; + int err; + bool tx_status; + + if (unlikely(!IS_STARTED(ar))) + goto err_free; + + hdr = (void *)skb->data; + info = IEEE80211_SKB_CB(skb); + len = skb->len; + + spin_lock_irqsave(&ar->tx_stats_lock, flags); + if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) { + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + return NETDEV_TX_OK; + } + + ar->tx_stats[queue].len++; + ar->tx_stats[queue].count++; + if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len) + ieee80211_stop_queue(hw, queue); + + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + + txc = (void *)skb_push(skb, sizeof(*txc)); + + tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) || + ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0)); + + if (info->control.hw_key) { + icv = info->control.hw_key->icv_len; + + switch (info->control.hw_key->alg) { + case ALG_WEP: + keytype = AR9170_TX_MAC_ENCR_RC4; + break; + case ALG_TKIP: + keytype = AR9170_TX_MAC_ENCR_RC4; + break; + case ALG_CCMP: + keytype = AR9170_TX_MAC_ENCR_AES; + break; + default: + WARN_ON(1); + goto err_dequeue; + } + } + + /* Length */ + txc->length = cpu_to_le16(len + icv + 4); + + txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | + AR9170_TX_MAC_BACKOFF); + txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] << + AR9170_TX_MAC_QOS_SHIFT); + txc->mac_control |= cpu_to_le16(keytype); + txc->phy_control = cpu_to_le32(0); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR); + + txrate = &info->control.rates[0]; + + if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); + else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); + + if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); + + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE); + + if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ); + /* this works because 40 MHz is 2 and dup is 3 */ + if (txrate->flags & IEEE80211_TX_RC_DUP_DATA) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP); + + if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI); + + if (txrate->flags & IEEE80211_TX_RC_MCS) { + u32 r = txrate->idx; + u8 *txpower; + + r <<= AR9170_TX_PHY_MCS_SHIFT; + if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK)) + goto err_dequeue; + txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK); + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); + + if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { + if (info->band == IEEE80211_BAND_5GHZ) + txpower = ar->power_5G_ht40; + else + txpower = ar->power_2G_ht40; + } else { + if (info->band == IEEE80211_BAND_5GHZ) + txpower = ar->power_5G_ht20; + else + txpower = ar->power_2G_ht20; + } + + power = txpower[(txrate->idx) & 7]; + } else { + u8 *txpower; + u32 mod; + u32 phyrate; + u8 idx = txrate->idx; + + if (info->band != IEEE80211_BAND_2GHZ) { + idx += 4; + txpower = ar->power_5G_leg; + mod = AR9170_TX_PHY_MOD_OFDM; + } else { + if (idx < 4) { + txpower = ar->power_2G_cck; + mod = AR9170_TX_PHY_MOD_CCK; + } else { + mod = AR9170_TX_PHY_MOD_OFDM; + txpower = ar->power_2G_ofdm; + } + } + + rate = &__ar9170_ratetable[idx]; + + phyrate = rate->hw_value & 0xF; + power = txpower[(rate->hw_value & 0x30) >> 4]; + phyrate <<= AR9170_TX_PHY_MCS_SHIFT; + + txc->phy_control |= cpu_to_le32(mod); + txc->phy_control |= cpu_to_le32(phyrate); + } + + power <<= AR9170_TX_PHY_TX_PWR_SHIFT; + power &= AR9170_TX_PHY_TX_PWR_MASK; + txc->phy_control |= cpu_to_le32(power); + + /* set TX chains */ + if (ar->eeprom.tx_mask == 1) { + chains = AR9170_TX_PHY_TXCHAIN_1; + } else { + chains = AR9170_TX_PHY_TXCHAIN_2; + + /* >= 36M legacy OFDM - use only one chain */ + if (rate && rate->bitrate >= 360) + chains = AR9170_TX_PHY_TXCHAIN_1; + } + txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); + + if (tx_status) { + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE); + /* + * WARNING: + * Putting the QoS queue bits into an unexplored territory is + * certainly not elegant. + * + * In my defense: This idea provides a reasonable way to + * smuggle valuable information to the tx_status callback. + * Also, the idea behind this bit-abuse came straight from + * the original driver code. + */ + + txc->phy_control |= + cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); + + if (info->control.sta) { + sta_info = (void *) info->control.sta->drv_priv; + skb_queue_tail(&sta_info->tx_status, skb); + } else { + skb_queue_tail(&ar->global_tx_status, skb); + + queue_delayed_work(ar->hw->workqueue, + &ar->tx_status_janitor, + msecs_to_jiffies(100)); + } + } + + err = ar->tx(ar, skb, tx_status, 0); + if (unlikely(tx_status && err)) { + if (info->control.sta) + skb_unlink(skb, &sta_info->tx_status); + else + skb_unlink(skb, &ar->global_tx_status); + } + + return NETDEV_TX_OK; + +err_dequeue: + spin_lock_irqsave(&ar->tx_stats_lock, flags); + ar->tx_stats[queue].len--; + ar->tx_stats[queue].count--; + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + +err_free: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int ar9170_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct ar9170 *ar = hw->priv; + int err = 0; + + mutex_lock(&ar->mutex); + + if (ar->vif) { + err = -EBUSY; + goto unlock; + } + + ar->vif = conf->vif; + memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN); + + if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { + ar->rx_software_decryption = true; + ar->disable_offload = true; + } + + ar->cur_filter = 0; + ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS; + err = ar9170_update_frame_filter(ar); + if (err) + goto unlock; + + err = ar9170_set_operating_mode(ar); + +unlock: + mutex_unlock(&ar->mutex); + return err; +} + +static void ar9170_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct ar9170 *ar = hw->priv; + + mutex_lock(&ar->mutex); + ar->vif = NULL; + ar->want_filter = 0; + ar9170_update_frame_filter(ar); + ar9170_set_beacon_timers(ar); + dev_kfree_skb(ar->beacon); + ar->beacon = NULL; + ar->sniffer_enabled = false; + ar->rx_software_decryption = false; + ar9170_set_operating_mode(ar); + mutex_unlock(&ar->mutex); +} + +static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ar9170 *ar = hw->priv; + int err = 0; + + mutex_lock(&ar->mutex); + + if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + /* + * is it long_frame_max_tx_count or short_frame_max_tx_count? + */ + + err = ar9170_set_hwretry_limit(ar, + ar->hw->conf.long_frame_max_tx_count); + if (err) + goto out; + } + + if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) { + err = ar9170_set_beacon_timers(ar); + if (err) + goto out; + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + err = ar9170_set_channel(ar, hw->conf.channel, + AR9170_RFI_NONE, AR9170_BW_20); + if (err) + goto out; + /* adjust slot time for 5 GHz */ + if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) + err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME, + 9 << 10); + } + +out: + mutex_unlock(&ar->mutex); + return err; +} + +static int ar9170_op_config_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_if_conf *conf) +{ + struct ar9170 *ar = hw->priv; + int err = 0; + + mutex_lock(&ar->mutex); + + if (conf->changed & IEEE80211_IFCC_BSSID) { + memcpy(ar->bssid, conf->bssid, ETH_ALEN); + err = ar9170_set_operating_mode(ar); + } + + if (conf->changed & IEEE80211_IFCC_BEACON) { + err = ar9170_update_beacon(ar); + + if (err) + goto out; + err = ar9170_set_beacon_timers(ar); + } + +out: + mutex_unlock(&ar->mutex); + return err; +} + +static void ar9170_set_filters(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, + filter_config_work); + int err; + + mutex_lock(&ar->mutex); + if (unlikely(!IS_STARTED(ar))) + goto unlock; + + if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) { + err = ar9170_set_operating_mode(ar); + if (err) + goto unlock; + } + + if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) { + err = ar9170_update_multicast(ar); + if (err) + goto unlock; + } + + if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER) + err = ar9170_update_frame_filter(ar); + +unlock: + mutex_unlock(&ar->mutex); +} + +static void ar9170_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + int mc_count, struct dev_mc_list *mclist) +{ + struct ar9170 *ar = hw->priv; + + /* mask supported flags */ + *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC | + FIF_PROMISC_IN_BSS; + + /* + * We can support more by setting the sniffer bit and + * then checking the error flags, later. + */ + + if (changed_flags & FIF_ALLMULTI) { + if (*new_flags & FIF_ALLMULTI) { + ar->want_mc_hash = ~0ULL; + } else { + u64 mchash; + int i; + + /* always get broadcast frames */ + mchash = 1ULL << (0xff>>2); + + for (i = 0; i < mc_count; i++) { + if (WARN_ON(!mclist)) + break; + mchash |= 1ULL << (mclist->dmi_addr[5] >> 2); + mclist = mclist->next; + } + ar->want_mc_hash = mchash; + } + ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST; + } + + if (changed_flags & FIF_CONTROL) { + u32 filter = AR9170_MAC_REG_FTF_PSPOLL | + AR9170_MAC_REG_FTF_RTS | + AR9170_MAC_REG_FTF_CTS | + AR9170_MAC_REG_FTF_ACK | + AR9170_MAC_REG_FTF_CFE | + AR9170_MAC_REG_FTF_CFE_ACK; + + if (*new_flags & FIF_CONTROL) + ar->want_filter = ar->cur_filter | filter; + else + ar->want_filter = ar->cur_filter & ~filter; + + ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER; + } + + if (changed_flags & FIF_PROMISC_IN_BSS) { + ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0; + ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC; + } + + if (likely(IS_STARTED(ar))) + queue_work(ar->hw->workqueue, &ar->filter_config_work); +} + +static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct ar9170 *ar = hw->priv; + int err = 0; + + mutex_lock(&ar->mutex); + + ar9170_regwrite_begin(ar); + + if (changed & BSS_CHANGED_ASSOC) { + ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state; + +#ifndef CONFIG_AR9170_LEDS + /* enable assoc LED. */ + err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0); +#endif /* CONFIG_AR9170_LEDS */ + } + + if (changed & BSS_CHANGED_HT) { + /* TODO */ + err = 0; + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + u32 slottime = 20; + + if (bss_conf->use_short_slot) + slottime = 9; + + ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + u32 cck, ofdm; + + if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) { + ofdm = bss_conf->basic_rates; + cck = 0; + } else { + /* four cck rates */ + cck = bss_conf->basic_rates & 0xf; + ofdm = bss_conf->basic_rates >> 4; + } + ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, + ofdm << 8 | cck); + } + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + mutex_unlock(&ar->mutex); +} + +static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw) +{ + struct ar9170 *ar = hw->priv; + int err; + u32 tsf_low; + u32 tsf_high; + u64 tsf; + + mutex_lock(&ar->mutex); + err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low); + if (!err) + err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high); + mutex_unlock(&ar->mutex); + + if (WARN_ON(err)) + return 0; + + tsf = tsf_high; + tsf = (tsf << 32) | tsf_low; + return tsf; +} + +static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ar9170 *ar = hw->priv; + int err = 0, i; + u8 ktype; + + if ((!ar->vif) || (ar->disable_offload)) + return -EOPNOTSUPP; + + switch (key->alg) { + case ALG_WEP: + if (key->keylen == LEN_WEP40) + ktype = AR9170_ENC_ALG_WEP64; + else + ktype = AR9170_ENC_ALG_WEP128; + break; + case ALG_TKIP: + ktype = AR9170_ENC_ALG_TKIP; + break; + case ALG_CCMP: + ktype = AR9170_ENC_ALG_AESCCMP; + break; + default: + return -EOPNOTSUPP; + } + + mutex_lock(&ar->mutex); + if (cmd == SET_KEY) { + if (unlikely(!IS_STARTED(ar))) { + err = -EOPNOTSUPP; + goto out; + } + + /* group keys need all-zeroes address */ + if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + sta = NULL; + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + for (i = 0; i < 64; i++) + if (!(ar->usedkeys & BIT(i))) + break; + if (i == 64) { + ar->rx_software_decryption = true; + ar9170_set_operating_mode(ar); + err = -ENOSPC; + goto out; + } + } else { + i = 64 + key->keyidx; + } + + key->hw_key_idx = i; + + err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0, + key->key, min_t(u8, 16, key->keylen)); + if (err) + goto out; + + if (key->alg == ALG_TKIP) { + err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, + ktype, 1, key->key + 16, 16); + if (err) + goto out; + + /* + * hardware is not capable generating the MMIC + * for fragmented frames! + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + } + + if (i < 64) + ar->usedkeys |= BIT(i); + + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } else { + if (unlikely(!IS_STARTED(ar))) { + /* The device is gone... together with the key ;-) */ + err = 0; + goto out; + } + + err = ar9170_disable_key(ar, key->hw_key_idx); + if (err) + goto out; + + if (key->hw_key_idx < 64) { + ar->usedkeys &= ~BIT(key->hw_key_idx); + } else { + err = ar9170_upload_key(ar, key->hw_key_idx, NULL, + AR9170_ENC_ALG_NONE, 0, + NULL, 0); + if (err) + goto out; + + if (key->alg == ALG_TKIP) { + err = ar9170_upload_key(ar, key->hw_key_idx, + NULL, + AR9170_ENC_ALG_NONE, 1, + NULL, 0); + if (err) + goto out; + } + + } + } + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys); + ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32); + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + +out: + mutex_unlock(&ar->mutex); + + return err; +} + +static void ar9170_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct ar9170 *ar = hw->priv; + struct ar9170_sta_info *info = (void *) sta->drv_priv; + struct sk_buff *skb; + + switch (cmd) { + case STA_NOTIFY_ADD: + skb_queue_head_init(&info->tx_status); + + /* + * do we already have a frame that needs tx_status + * from this station in our queue? + * If so then transfer them to the new station queue. + */ + + /* preserve the correct order - check the waste bin first */ + while ((skb = ar9170_find_skb_in_queue_by_mac(ar, sta->addr, + &ar->global_tx_status_waste))) + skb_queue_tail(&info->tx_status, skb); + + /* now the still pending frames */ + while ((skb = ar9170_find_skb_in_queue_by_mac(ar, sta->addr, + &ar->global_tx_status))) + skb_queue_tail(&info->tx_status, skb); + +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: STA[%pM] has %d queued frames =>\n", + wiphy_name(ar->hw->wiphy), sta->addr, + skb_queue_len(&info->tx_status)); + + ar9170_dump_station_tx_status_queue(ar, &info->tx_status); +#endif /* AR9170_QUEUE_DEBUG */ + + + break; + + case STA_NOTIFY_REMOVE: + + /* + * transfer all outstanding frames that need a tx_status + * reports to the fallback queue + */ + + while ((skb = skb_dequeue(&ar->global_tx_status))) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: queueing frame in global " + "tx_status queue =>\n", + wiphy_name(ar->hw->wiphy)); + + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + skb_queue_tail(&ar->global_tx_status_waste, skb); + } + break; + + default: + break; + } +} + +static int ar9170_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct ar9170 *ar = hw->priv; + u32 val; + int err; + + mutex_lock(&ar->mutex); + err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val); + ar->stats.dot11ACKFailureCount += val; + + memcpy(stats, &ar->stats, sizeof(*stats)); + mutex_unlock(&ar->mutex); + + return 0; +} + +static int ar9170_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *tx_stats) +{ + struct ar9170 *ar = hw->priv; + + spin_lock_bh(&ar->tx_stats_lock); + memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues); + spin_unlock_bh(&ar->tx_stats_lock); + + return 0; +} + +static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *param) +{ + struct ar9170 *ar = hw->priv; + int ret; + + mutex_lock(&ar->mutex); + if ((param) && !(queue > 4)) { + memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], + param, sizeof(*param)); + + ret = ar9170_set_qos(ar); + } else + ret = -EINVAL; + + mutex_unlock(&ar->mutex); + return ret; +} + +static const struct ieee80211_ops ar9170_ops = { + .start = ar9170_op_start, + .stop = ar9170_op_stop, + .tx = ar9170_op_tx, + .add_interface = ar9170_op_add_interface, + .remove_interface = ar9170_op_remove_interface, + .config = ar9170_op_config, + .config_interface = ar9170_op_config_interface, + .configure_filter = ar9170_op_configure_filter, + .conf_tx = ar9170_conf_tx, + .bss_info_changed = ar9170_op_bss_info_changed, + .get_tsf = ar9170_op_get_tsf, + .set_key = ar9170_set_key, + .sta_notify = ar9170_sta_notify, + .get_stats = ar9170_get_stats, + .get_tx_stats = ar9170_get_tx_stats, +}; + +void *ar9170_alloc(size_t priv_size) +{ + struct ieee80211_hw *hw; + struct ar9170 *ar; + int i; + + hw = ieee80211_alloc_hw(priv_size, &ar9170_ops); + if (!hw) + return ERR_PTR(-ENOMEM); + + ar = hw->priv; + ar->hw = hw; + + mutex_init(&ar->mutex); + spin_lock_init(&ar->cmdlock); + spin_lock_init(&ar->tx_stats_lock); + skb_queue_head_init(&ar->global_tx_status); + skb_queue_head_init(&ar->global_tx_status_waste); + INIT_WORK(&ar->filter_config_work, ar9170_set_filters); + INIT_WORK(&ar->beacon_work, ar9170_new_beacon); + INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor); + + /* all hw supports 2.4 GHz, so set channel to 1 by default */ + ar->channel = &ar9170_2ghz_chantable[0]; + + /* first part of wiphy init */ + ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_WDS) | + BIT(NL80211_IFTYPE_ADHOC); + ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + + ar->hw->queues = 4; + ar->hw->extra_tx_headroom = 8; + ar->hw->sta_data_size = sizeof(struct ar9170_sta_info); + + ar->hw->max_rates = 1; + ar->hw->max_rate_tries = 3; + + for (i = 0; i < ARRAY_SIZE(ar->noise); i++) + ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ + + return ar; +} +EXPORT_SYMBOL_GPL(ar9170_alloc); + +static int ar9170_read_eeprom(struct ar9170 *ar) +{ +#define RW 8 /* number of words to read at once */ +#define RB (sizeof(u32) * RW) + DECLARE_MAC_BUF(mbuf); + u8 *eeprom = (void *)&ar->eeprom; + u8 *addr = ar->eeprom.mac_address; + __le32 offsets[RW]; + int i, j, err, bands = 0; + + BUILD_BUG_ON(sizeof(ar->eeprom) & 3); + + BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4); +#ifndef __CHECKER__ + /* don't want to handle trailing remains */ + BUILD_BUG_ON(sizeof(ar->eeprom) % RB); +#endif + + for (i = 0; i < sizeof(ar->eeprom)/RB; i++) { + for (j = 0; j < RW; j++) + offsets[j] = cpu_to_le32(AR9170_EEPROM_START + + RB * i + 4 * j); + + err = ar->exec_cmd(ar, AR9170_CMD_RREG, + RB, (u8 *) &offsets, + RB, eeprom + RB * i); + if (err) + return err; + } + +#undef RW +#undef RB + + if (ar->eeprom.length == cpu_to_le16(0xFFFF)) + return -ENODATA; + + if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { + ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz; + bands++; + } + if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { + ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz; + bands++; + } + /* + * I measured this, a bandswitch takes roughly + * 135 ms and a frequency switch about 80. + * + * FIXME: measure these values again once EEPROM settings + * are used, that will influence them! + */ + if (bands == 2) + ar->hw->channel_change_time = 135 * 1000; + else + ar->hw->channel_change_time = 80 * 1000; + + /* second part of wiphy init */ + SET_IEEE80211_PERM_ADDR(ar->hw, addr); + + return bands ? 0 : -EINVAL; +} + +int ar9170_register(struct ar9170 *ar, struct device *pdev) +{ + int err; + + /* try to read EEPROM, init MAC addr */ + err = ar9170_read_eeprom(ar); + if (err) + goto err_out; + + err = ieee80211_register_hw(ar->hw); + if (err) + goto err_out; + + err = ar9170_init_leds(ar); + if (err) + goto err_unreg; + +#ifdef CONFIG_AR9170_LEDS + err = ar9170_register_leds(ar); + if (err) + goto err_unreg; +#endif /* CONFIG_AR9170_LEDS */ + + dev_info(pdev, "Atheros AR9170 is registered as '%s'\n", + wiphy_name(ar->hw->wiphy)); + + return err; + +err_unreg: + ieee80211_unregister_hw(ar->hw); + +err_out: + return err; +} +EXPORT_SYMBOL_GPL(ar9170_register); + +void ar9170_unregister(struct ar9170 *ar) +{ +#ifdef CONFIG_AR9170_LEDS + ar9170_unregister_leds(ar); +#endif /* CONFIG_AR9170_LEDS */ + + ieee80211_unregister_hw(ar->hw); + mutex_destroy(&ar->mutex); +} +EXPORT_SYMBOL_GPL(ar9170_unregister); + +static int __init ar9170_init(void) +{ + if (modparam_nohwcrypt) + printk(KERN_INFO "ar9170: cryptographic acceleration " + "disabled.\n"); + + return 0; +} + +static void __exit ar9170_exit(void) +{ + +} + +module_init(ar9170_init); +module_exit(ar9170_exit); -- cgit v0.10.2 From 6cb19353535f9f02fc2a753e3261a255406ba8fa Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Sun, 22 Mar 2009 02:36:06 +0100 Subject: ar9170: mac/bbp and phy code MAC/BBP and PHY programming code Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ar9170/mac.c b/drivers/net/wireless/ar9170/mac.c new file mode 100644 index 0000000..c8fa307 --- /dev/null +++ b/drivers/net/wireless/ar9170/mac.c @@ -0,0 +1,452 @@ +/* + * Atheros AR9170 driver + * + * MAC programming + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include "ar9170.h" +#include "cmd.h" + +int ar9170_set_qos(struct ar9170 *ar) +{ + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min | + (ar->edcf[0].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min | + (ar->edcf[1].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min | + (ar->edcf[2].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min | + (ar->edcf[3].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min | + (ar->edcf[4].cw_max << 16)); + + ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS, + ((ar->edcf[0].aifs * 9 + 10)) | + ((ar->edcf[1].aifs * 9 + 10) << 12) | + ((ar->edcf[2].aifs * 9 + 10) << 24)); + ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS, + ((ar->edcf[2].aifs * 9 + 10) >> 8) | + ((ar->edcf[3].aifs * 9 + 10) << 4) | + ((ar->edcf[4].aifs * 9 + 10) << 16)); + + ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP, + ar->edcf[0].txop | ar->edcf[1].txop << 16); + ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP, + ar->edcf[1].txop | ar->edcf[3].txop << 16); + + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_init_mac(struct ar9170 *ar) +{ + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40); + + ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0); + + /* enable MMIC */ + ar9170_regwrite(AR9170_MAC_REG_SNIFFER, + AR9170_MAC_REG_SNIFFER_DEFAULTS); + + ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80); + + ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70); + ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000); + ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10); + + /* CF-END mode */ + ar9170_regwrite(0x1c3b2c, 0x19000000); + + /* NAV protects ACK only (in TXOP) */ + ar9170_regwrite(0x1c3b38, 0x201); + + /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */ + /* OTUS set AM to 0x1 */ + ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170); + + ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105); + + /* AGG test code*/ + /* Aggregation MAX number and timeout */ + ar9170_regwrite(0x1c3b9c, 0x10000a); + + ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER, + AR9170_MAC_REG_FTF_DEFAULTS); + + /* Enable deaggregator, response in sniffer mode */ + ar9170_regwrite(0x1c3c40, 0x1 | 1<<30); + + /* rate sets */ + ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f); + ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f); + ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb); + + /* MIMO response control */ + ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */ + + /* switch MAC to OTUS interface */ + ar9170_regwrite(0x1c3600, 0x3); + + ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff); + + /* set PHY register read timeout (??) */ + ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008); + + /* Disable Rx TimeOut, workaround for BB. */ + ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0); + + /* Set CPU clock frequency to 88/80MHz */ + ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL, + AR9170_PWR_CLK_AHB_80_88MHZ | + AR9170_PWR_CLK_DAC_160_INV_DLY); + + /* Set WLAN DMA interrupt mode: generate int per packet */ + ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011); + + ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT, + AR9170_MAC_FCS_FIFO_PROT); + + /* Disables the CF_END frame, undocumented register */ + ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND, + 0x141E0F48); + + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac) +{ + static const u8 zero[ETH_ALEN] = { 0 }; + + if (!mac) + mac = zero; + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(reg, + (mac[3] << 24) | (mac[2] << 16) | + (mac[1] << 8) | mac[0]); + + ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]); + + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_update_multicast(struct ar9170 *ar) +{ + int err; + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, + ar->want_mc_hash >> 32); + ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, + ar->want_mc_hash); + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + + if (err) + return err; + + ar->cur_mc_hash = ar->want_mc_hash; + + return 0; +} + +int ar9170_update_frame_filter(struct ar9170 *ar) +{ + int err; + + err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER, + ar->want_filter); + + if (err) + return err; + + ar->cur_filter = ar->want_filter; + + return 0; +} + +static int ar9170_set_promiscouous(struct ar9170 *ar) +{ + u32 encr_mode, sniffer; + int err; + + err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer); + if (err) + return err; + + err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode); + if (err) + return err; + + if (ar->sniffer_enabled) { + sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC; + + /* + * Rx decryption works in place. + * + * If we don't disable it, the hardware will render all + * encrypted frames which are encrypted with an unknown + * key useless. + */ + + encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; + ar->sniffer_enabled = true; + } else { + sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC; + + if (ar->rx_software_decryption) + encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; + else + encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; + } + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode); + ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer); + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_set_operating_mode(struct ar9170 *ar) +{ + u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS; + u8 *mac_addr, *bssid; + int err; + + if (ar->vif) { + mac_addr = ar->mac_addr; + bssid = ar->bssid; + + switch (ar->vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS; + break; +/* case NL80211_IFTYPE_AP: + pm_mode |= AR9170_MAC_REG_POWERMGT_AP; + break;*/ + case NL80211_IFTYPE_WDS: + pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS; + break; + case NL80211_IFTYPE_MONITOR: + ar->sniffer_enabled = true; + ar->rx_software_decryption = true; + break; + default: + pm_mode |= AR9170_MAC_REG_POWERMGT_STA; + break; + } + } else { + mac_addr = NULL; + bssid = NULL; + } + + err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr); + if (err) + return err; + + err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid); + if (err) + return err; + + err = ar9170_set_promiscouous(ar); + if (err) + return err; + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode); + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry) +{ + u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111); + + return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp); +} + +int ar9170_set_beacon_timers(struct ar9170 *ar) +{ + u32 v = 0; + u32 pretbtt = 0; + + v |= ar->hw->conf.beacon_int; + + if (ar->vif) { + switch (ar->vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + v |= BIT(25); + break; + case NL80211_IFTYPE_AP: + v |= BIT(24); + pretbtt = (ar->hw->conf.beacon_int - 6) << 16; + break; + default: + break; + } + + v |= ar->vif->bss_conf.dtim_period << 16; + } + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt); + ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v); + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +int ar9170_update_beacon(struct ar9170 *ar) +{ + struct sk_buff *skb; + __le32 *data, *old = NULL; + u32 word; + int i; + + skb = ieee80211_beacon_get(ar->hw, ar->vif); + if (!skb) + return -ENOMEM; + + data = (__le32 *)skb->data; + if (ar->beacon) + old = (__le32 *)ar->beacon->data; + + ar9170_regwrite_begin(ar); + for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { + /* + * XXX: This accesses beyond skb data for up + * to the last 3 bytes!! + */ + + if (old && (data[i] == old[i])) + continue; + + word = le32_to_cpu(data[i]); + ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word); + } + + /* XXX: use skb->cb info */ + if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) + ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, + ((skb->len + 4) << (3+16)) + 0x0400); + else + ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, + ((skb->len + 4) << (3+16)) + 0x0400); + + ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4); + ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS); + ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1); + + ar9170_regwrite_finish(); + + dev_kfree_skb(ar->beacon); + ar->beacon = skb; + + return ar9170_regwrite_result(); +} + +void ar9170_new_beacon(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, + beacon_work); + struct sk_buff *skb; + + if (unlikely(!IS_STARTED(ar))) + return ; + + mutex_lock(&ar->mutex); + + if (!ar->vif) + goto out; + + ar9170_update_beacon(ar); + + rcu_read_lock(); + while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif))) + ar9170_op_tx(ar->hw, skb); + + rcu_read_unlock(); + + out: + mutex_unlock(&ar->mutex); +} + +int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype, + u8 keyidx, u8 *keydata, int keylen) +{ + __le32 vals[7]; + static const u8 bcast[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + u8 dummy; + + mac = mac ? : bcast; + + vals[0] = cpu_to_le32((keyidx << 16) + id); + vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype); + vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 | + mac[3] << 8 | mac[2]); + memset(&vals[3], 0, 16); + if (keydata) + memcpy(&vals[3], keydata, keylen); + + return ar->exec_cmd(ar, AR9170_CMD_EKEY, + sizeof(vals), (u8 *)vals, + 1, &dummy); +} + +int ar9170_disable_key(struct ar9170 *ar, u8 id) +{ + __le32 val = cpu_to_le32(id); + u8 dummy; + + return ar->exec_cmd(ar, AR9170_CMD_EKEY, + sizeof(val), (u8 *)&val, + 1, &dummy); +} diff --git a/drivers/net/wireless/ar9170/phy.c b/drivers/net/wireless/ar9170/phy.c new file mode 100644 index 0000000..6ce2075 --- /dev/null +++ b/drivers/net/wireless/ar9170/phy.c @@ -0,0 +1,1240 @@ +/* + * Atheros AR9170 driver + * + * PHY and RF code + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "ar9170.h" +#include "cmd.h" + +static int ar9170_init_power_cal(struct ar9170 *ar) +{ + ar9170_regwrite_begin(ar); + + ar9170_regwrite(0x1bc000 + 0x993c, 0x7f); + ar9170_regwrite(0x1bc000 + 0x9934, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0x9938, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa234, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa238, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa38c, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa390, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa3cc, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa3d0, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa3d4, 0x3f3f3f3f); + + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +struct ar9170_phy_init { + u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20; +}; + +static struct ar9170_phy_init ar5416_phy_init[] = { + { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, }, + { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, }, + { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, }, + { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, }, + { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, }, + { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, }, + { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, }, + { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, }, + { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, }, + { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, }, + { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, }, + { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, }, + { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, }, + { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, }, + { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, }, + { 0x1c5850, 0x6c48b4e4, 0x6c48b4e4, 0x6c48b0e4, 0x6c48b0e4, }, + { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, }, + { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, }, + { 0x1c585c, 0x31395c5e, 0x31395c5e, 0x31395c5e, 0x31395c5e, }, + { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, }, + { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, }, + { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, }, + { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, }, + { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, }, + { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, }, + { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, }, + { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, }, + { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, }, + { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, }, + { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, }, + { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, }, + { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, }, + { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, }, + { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, }, + { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, }, + { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, }, + { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, }, + { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, }, + { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, }, + { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, }, + { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, }, + { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, }, + { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, }, + { 0x1c59c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, }, + { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, }, + { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, }, + { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, }, + { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, }, + { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, }, + { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, }, + { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, }, + { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, }, + { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, }, + { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, }, + { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, }, + { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, }, + { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, }, + { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, }, + { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, }, + { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, }, + { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, }, + { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, }, + { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, }, + { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, }, + { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, }, + { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, }, + { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, }, + { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, }, + { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, }, + { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, }, + { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, }, + { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, }, + { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, }, + { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, }, + { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, }, + { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, }, + { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, }, + { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, }, + { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, }, + { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, }, + { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, }, + { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, }, + { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, }, + { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, }, + { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, }, + { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, }, + { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, }, + { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, }, + { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, }, + { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, }, + { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, }, + { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, }, + { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, }, + { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, }, + { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, }, + { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, }, + { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, }, + { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, }, + { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, }, + { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, }, + { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, }, + { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, }, + { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, }, + { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, }, + { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, }, + { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, }, + { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, }, + { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, }, + { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, }, + { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, }, + { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, }, + { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, }, + { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, }, + { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, }, + { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, }, + { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, }, + { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, }, + { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, }, + { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, }, + { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, }, + { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, }, + { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, }, + { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, }, + { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, }, + { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, }, + { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, }, + { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, }, + { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, }, + { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, }, + { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, }, + { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, }, + { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, }, + { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, }, + { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, }, + { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, }, + { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, }, + { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, }, + { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, }, + { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, }, + { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, }, + { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, }, + { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, }, + { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, }, + { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, }, + { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, }, + { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, }, + { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, }, + { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, }, + { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, }, + { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, }, + { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, }, + { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, }, + { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, }, + { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, }, + { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, }, + { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, }, + { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, }, + { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, }, + { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, }, + { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, }, + { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, }, + { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, }, + { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, }, + { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, }, + { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, }, + { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, }, + { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, }, + { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, }, + { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, }, + { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, }, + { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, }, + { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, }, + { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, }, + { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, }, + { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, }, + { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, }, + { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, }, + { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, }, + { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, }, + { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, }, + { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, }, + { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, }, + { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, }, + { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, }, + { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, }, + { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, }, + { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, }, +/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */ + { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, }, + { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, }, + { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, }, + { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, }, + { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, }, + { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, }, + { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, }, + { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, }, + { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, }, + { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, }, + { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, }, + { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, }, + { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, }, + { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, }, + { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, }, + { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, } +}; + +int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band) +{ + int i, err; + u32 val; + bool is_2ghz = band == IEEE80211_BAND_2GHZ; + bool is_40mhz = false; /* XXX: for now */ + + ar9170_regwrite_begin(ar); + + for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) { + if (is_40mhz) { + if (is_2ghz) + val = ar5416_phy_init[i]._2ghz_40; + else + val = ar5416_phy_init[i]._5ghz_40; + } else { + if (is_2ghz) + val = ar5416_phy_init[i]._2ghz_20; + else + val = ar5416_phy_init[i]._5ghz_20; + } + + ar9170_regwrite(ar5416_phy_init[i].reg, val); + } + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + return err; + + /* XXX: use EEPROM data here! */ + + err = ar9170_init_power_cal(ar); + if (err) + return err; + + /* XXX: remove magic! */ + if (is_2ghz) + err = ar9170_write_reg(ar, 0x1d4014, 0x5163); + else + err = ar9170_write_reg(ar, 0x1d4014, 0x5143); + + return err; +} + +struct ar9170_rf_init { + u32 reg, _5ghz, _2ghz; +}; + +static struct ar9170_rf_init ar9170_rf_init[] = { + /* bank 0 */ + { 0x1c58b0, 0x1e5795e5, 0x1e5795e5}, + { 0x1c58e0, 0x02008020, 0x02008020}, + /* bank 1 */ + { 0x1c58b0, 0x02108421, 0x02108421}, + { 0x1c58ec, 0x00000008, 0x00000008}, + /* bank 2 */ + { 0x1c58b0, 0x0e73ff17, 0x0e73ff17}, + { 0x1c58e0, 0x00000420, 0x00000420}, + /* bank 3 */ + { 0x1c58f0, 0x01400018, 0x01c00018}, + /* bank 4 */ + { 0x1c58b0, 0x000001a1, 0x000001a1}, + { 0x1c58e8, 0x00000001, 0x00000001}, + /* bank 5 */ + { 0x1c58b0, 0x00000013, 0x00000013}, + { 0x1c58e4, 0x00000002, 0x00000002}, + /* bank 6 */ + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00004000, 0x00004000}, + { 0x1c58b0, 0x00006c00, 0x00006c00}, + { 0x1c58b0, 0x00002c00, 0x00002c00}, + { 0x1c58b0, 0x00004800, 0x00004800}, + { 0x1c58b0, 0x00004000, 0x00004000}, + { 0x1c58b0, 0x00006000, 0x00006000}, + { 0x1c58b0, 0x00001000, 0x00001000}, + { 0x1c58b0, 0x00004000, 0x00004000}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00087c00, 0x00087c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00005400, 0x00005400}, + { 0x1c58b0, 0x00000c00, 0x00000c00}, + { 0x1c58b0, 0x00001800, 0x00001800}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00006c00, 0x00006c00}, + { 0x1c58b0, 0x00006c00, 0x00006c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00002c00, 0x00002c00}, + { 0x1c58b0, 0x00003c00, 0x00003c00}, + { 0x1c58b0, 0x00003800, 0x00003800}, + { 0x1c58b0, 0x00001c00, 0x00001c00}, + { 0x1c58b0, 0x00000800, 0x00000800}, + { 0x1c58b0, 0x00000408, 0x00000408}, + { 0x1c58b0, 0x00004c15, 0x00004c15}, + { 0x1c58b0, 0x00004188, 0x00004188}, + { 0x1c58b0, 0x0000201e, 0x0000201e}, + { 0x1c58b0, 0x00010408, 0x00010408}, + { 0x1c58b0, 0x00000801, 0x00000801}, + { 0x1c58b0, 0x00000c08, 0x00000c08}, + { 0x1c58b0, 0x0000181e, 0x0000181e}, + { 0x1c58b0, 0x00001016, 0x00001016}, + { 0x1c58b0, 0x00002800, 0x00002800}, + { 0x1c58b0, 0x00004010, 0x00004010}, + { 0x1c58b0, 0x0000081c, 0x0000081c}, + { 0x1c58b0, 0x00000115, 0x00000115}, + { 0x1c58b0, 0x00000015, 0x00000015}, + { 0x1c58b0, 0x00000066, 0x00000066}, + { 0x1c58b0, 0x0000001c, 0x0000001c}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000004, 0x00000004}, + { 0x1c58b0, 0x00000015, 0x00000015}, + { 0x1c58b0, 0x0000001f, 0x0000001f}, + { 0x1c58e0, 0x00000000, 0x00000400}, + /* bank 7 */ + { 0x1c58b0, 0x000000a0, 0x000000a0}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000040, 0x00000040}, + { 0x1c58f0, 0x0000001c, 0x0000001c}, +}; + +static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz) +{ + int err, i; + + ar9170_regwrite_begin(ar); + + for (i = 0; i < ARRAY_SIZE(ar9170_rf_init); i++) + ar9170_regwrite(ar9170_rf_init[i].reg, + band5ghz ? ar9170_rf_init[i]._5ghz + : ar9170_rf_init[i]._2ghz); + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + printk(KERN_ERR "%s: rf init failed\n", + wiphy_name(ar->hw->wiphy)); + return err; +} + +static int ar9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz, + u32 freq, enum ar9170_bw bw) +{ + int err; + u32 d0, d1, td0, td1, fd0, fd1; + u8 chansel; + u8 refsel0 = 1, refsel1 = 0; + u8 lf_synth = 0; + + switch (bw) { + case AR9170_BW_40_ABOVE: + freq += 10; + break; + case AR9170_BW_40_BELOW: + freq -= 10; + break; + case AR9170_BW_20: + break; + case __AR9170_NUM_BW: + BUG(); + } + + if (band5ghz) { + if (freq % 10) { + chansel = (freq - 4800) / 5; + } else { + chansel = ((freq - 4800) / 10) * 2; + refsel0 = 0; + refsel1 = 1; + } + chansel = byte_rev_table[chansel]; + } else { + if (freq == 2484) { + chansel = 10 + (freq - 2274) / 5; + lf_synth = 1; + } else + chansel = 16 + (freq - 2272) / 5; + chansel *= 4; + chansel = byte_rev_table[chansel]; + } + + d1 = chansel; + d0 = 0x21 | + refsel0 << 3 | + refsel1 << 2 | + lf_synth << 1; + td0 = d0 & 0x1f; + td1 = d1 & 0x1f; + fd0 = td1 << 5 | td0; + + td0 = (d0 >> 5) & 0x7; + td1 = (d1 >> 5) & 0x7; + fd1 = td1 << 5 | td0; + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(0x1c58b0, fd0); + ar9170_regwrite(0x1c58e8, fd1); + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + return err; + + msleep(10); + + return 0; +} + +struct ar9170_phy_freq_params { + u8 coeff_exp; + u16 coeff_man; + u8 coeff_exp_shgi; + u16 coeff_man_shgi; +}; + +struct ar9170_phy_freq_entry { + u16 freq; + struct ar9170_phy_freq_params params[__AR9170_NUM_BW]; +}; + +/* NB: must be in sync with channel tables in main! */ +static const struct ar9170_phy_freq_entry ar9170_phy_freq_params[] = { +/* + * freq, + * 20MHz, + * 40MHz (below), + * 40Mhz (above), + */ + { 2412, { + { 3, 21737, 3, 19563, }, + { 3, 21827, 3, 19644, }, + { 3, 21647, 3, 19482, }, + } }, + { 2417, { + { 3, 21692, 3, 19523, }, + { 3, 21782, 3, 19604, }, + { 3, 21602, 3, 19442, }, + } }, + { 2422, { + { 3, 21647, 3, 19482, }, + { 3, 21737, 3, 19563, }, + { 3, 21558, 3, 19402, }, + } }, + { 2427, { + { 3, 21602, 3, 19442, }, + { 3, 21692, 3, 19523, }, + { 3, 21514, 3, 19362, }, + } }, + { 2432, { + { 3, 21558, 3, 19402, }, + { 3, 21647, 3, 19482, }, + { 3, 21470, 3, 19323, }, + } }, + { 2437, { + { 3, 21514, 3, 19362, }, + { 3, 21602, 3, 19442, }, + { 3, 21426, 3, 19283, }, + } }, + { 2442, { + { 3, 21470, 3, 19323, }, + { 3, 21558, 3, 19402, }, + { 3, 21382, 3, 19244, }, + } }, + { 2447, { + { 3, 21426, 3, 19283, }, + { 3, 21514, 3, 19362, }, + { 3, 21339, 3, 19205, }, + } }, + { 2452, { + { 3, 21382, 3, 19244, }, + { 3, 21470, 3, 19323, }, + { 3, 21295, 3, 19166, }, + } }, + { 2457, { + { 3, 21339, 3, 19205, }, + { 3, 21426, 3, 19283, }, + { 3, 21252, 3, 19127, }, + } }, + { 2462, { + { 3, 21295, 3, 19166, }, + { 3, 21382, 3, 19244, }, + { 3, 21209, 3, 19088, }, + } }, + { 2467, { + { 3, 21252, 3, 19127, }, + { 3, 21339, 3, 19205, }, + { 3, 21166, 3, 19050, }, + } }, + { 2472, { + { 3, 21209, 3, 19088, }, + { 3, 21295, 3, 19166, }, + { 3, 21124, 3, 19011, }, + } }, + { 2484, { + { 3, 21107, 3, 18996, }, + { 3, 21192, 3, 19073, }, + { 3, 21022, 3, 18920, }, + } }, + { 4920, { + { 4, 21313, 4, 19181, }, + { 4, 21356, 4, 19220, }, + { 4, 21269, 4, 19142, }, + } }, + { 4940, { + { 4, 21226, 4, 19104, }, + { 4, 21269, 4, 19142, }, + { 4, 21183, 4, 19065, }, + } }, + { 4960, { + { 4, 21141, 4, 19027, }, + { 4, 21183, 4, 19065, }, + { 4, 21098, 4, 18988, }, + } }, + { 4980, { + { 4, 21056, 4, 18950, }, + { 4, 21098, 4, 18988, }, + { 4, 21014, 4, 18912, }, + } }, + { 5040, { + { 4, 20805, 4, 18725, }, + { 4, 20846, 4, 18762, }, + { 4, 20764, 4, 18687, }, + } }, + { 5060, { + { 4, 20723, 4, 18651, }, + { 4, 20764, 4, 18687, }, + { 4, 20682, 4, 18614, }, + } }, + { 5080, { + { 4, 20641, 4, 18577, }, + { 4, 20682, 4, 18614, }, + { 4, 20601, 4, 18541, }, + } }, + { 5180, { + { 4, 20243, 4, 18219, }, + { 4, 20282, 4, 18254, }, + { 4, 20204, 4, 18183, }, + } }, + { 5200, { + { 4, 20165, 4, 18148, }, + { 4, 20204, 4, 18183, }, + { 4, 20126, 4, 18114, }, + } }, + { 5220, { + { 4, 20088, 4, 18079, }, + { 4, 20126, 4, 18114, }, + { 4, 20049, 4, 18044, }, + } }, + { 5240, { + { 4, 20011, 4, 18010, }, + { 4, 20049, 4, 18044, }, + { 4, 19973, 4, 17976, }, + } }, + { 5260, { + { 4, 19935, 4, 17941, }, + { 4, 19973, 4, 17976, }, + { 4, 19897, 4, 17907, }, + } }, + { 5280, { + { 4, 19859, 4, 17873, }, + { 4, 19897, 4, 17907, }, + { 4, 19822, 4, 17840, }, + } }, + { 5300, { + { 4, 19784, 4, 17806, }, + { 4, 19822, 4, 17840, }, + { 4, 19747, 4, 17772, }, + } }, + { 5320, { + { 4, 19710, 4, 17739, }, + { 4, 19747, 4, 17772, }, + { 4, 19673, 4, 17706, }, + } }, + { 5500, { + { 4, 19065, 4, 17159, }, + { 4, 19100, 4, 17190, }, + { 4, 19030, 4, 17127, }, + } }, + { 5520, { + { 4, 18996, 4, 17096, }, + { 4, 19030, 4, 17127, }, + { 4, 18962, 4, 17065, }, + } }, + { 5540, { + { 4, 18927, 4, 17035, }, + { 4, 18962, 4, 17065, }, + { 4, 18893, 4, 17004, }, + } }, + { 5560, { + { 4, 18859, 4, 16973, }, + { 4, 18893, 4, 17004, }, + { 4, 18825, 4, 16943, }, + } }, + { 5580, { + { 4, 18792, 4, 16913, }, + { 4, 18825, 4, 16943, }, + { 4, 18758, 4, 16882, }, + } }, + { 5600, { + { 4, 18725, 4, 16852, }, + { 4, 18758, 4, 16882, }, + { 4, 18691, 4, 16822, }, + } }, + { 5620, { + { 4, 18658, 4, 16792, }, + { 4, 18691, 4, 16822, }, + { 4, 18625, 4, 16762, }, + } }, + { 5640, { + { 4, 18592, 4, 16733, }, + { 4, 18625, 4, 16762, }, + { 4, 18559, 4, 16703, }, + } }, + { 5660, { + { 4, 18526, 4, 16673, }, + { 4, 18559, 4, 16703, }, + { 4, 18493, 4, 16644, }, + } }, + { 5680, { + { 4, 18461, 4, 16615, }, + { 4, 18493, 4, 16644, }, + { 4, 18428, 4, 16586, }, + } }, + { 5700, { + { 4, 18396, 4, 16556, }, + { 4, 18428, 4, 16586, }, + { 4, 18364, 4, 16527, }, + } }, + { 5745, { + { 4, 18252, 4, 16427, }, + { 4, 18284, 4, 16455, }, + { 4, 18220, 4, 16398, }, + } }, + { 5765, { + { 4, 18189, 5, 32740, }, + { 4, 18220, 4, 16398, }, + { 4, 18157, 5, 32683, }, + } }, + { 5785, { + { 4, 18126, 5, 32626, }, + { 4, 18157, 5, 32683, }, + { 4, 18094, 5, 32570, }, + } }, + { 5805, { + { 4, 18063, 5, 32514, }, + { 4, 18094, 5, 32570, }, + { 4, 18032, 5, 32458, }, + } }, + { 5825, { + { 4, 18001, 5, 32402, }, + { 4, 18032, 5, 32458, }, + { 4, 17970, 5, 32347, }, + } }, + { 5170, { + { 4, 20282, 4, 18254, }, + { 4, 20321, 4, 18289, }, + { 4, 20243, 4, 18219, }, + } }, + { 5190, { + { 4, 20204, 4, 18183, }, + { 4, 20243, 4, 18219, }, + { 4, 20165, 4, 18148, }, + } }, + { 5210, { + { 4, 20126, 4, 18114, }, + { 4, 20165, 4, 18148, }, + { 4, 20088, 4, 18079, }, + } }, + { 5230, { + { 4, 20049, 4, 18044, }, + { 4, 20088, 4, 18079, }, + { 4, 20011, 4, 18010, }, + } }, +}; + +static const struct ar9170_phy_freq_params * +ar9170_get_hw_dyn_params(struct ieee80211_channel *channel, + enum ar9170_bw bw) +{ + unsigned int chanidx = 0; + u16 freq = 2412; + + if (channel) { + chanidx = channel->hw_value; + freq = channel->center_freq; + } + + BUG_ON(chanidx >= ARRAY_SIZE(ar9170_phy_freq_params)); + + BUILD_BUG_ON(__AR9170_NUM_BW != 3); + + WARN_ON(ar9170_phy_freq_params[chanidx].freq != freq); + + return &ar9170_phy_freq_params[chanidx].params[bw]; +} + + +int ar9170_init_rf(struct ar9170 *ar) +{ + const struct ar9170_phy_freq_params *freqpar; + __le32 cmd[7]; + int err; + + err = ar9170_init_rf_banks_0_7(ar, false); + if (err) + return err; + + err = ar9170_init_rf_bank4_pwr(ar, false, 2412, AR9170_BW_20); + if (err) + return err; + + freqpar = ar9170_get_hw_dyn_params(NULL, AR9170_BW_20); + + cmd[0] = cpu_to_le32(2412 * 1000); + cmd[1] = cpu_to_le32(0); + cmd[2] = cpu_to_le32(1); + cmd[3] = cpu_to_le32(freqpar->coeff_exp); + cmd[4] = cpu_to_le32(freqpar->coeff_man); + cmd[5] = cpu_to_le32(freqpar->coeff_exp_shgi); + cmd[6] = cpu_to_le32(freqpar->coeff_man_shgi); + + /* RF_INIT echoes the command back to us */ + err = ar->exec_cmd(ar, AR9170_CMD_RF_INIT, + sizeof(cmd), (u8 *)cmd, + sizeof(cmd), (u8 *)cmd); + if (err) + return err; + + msleep(1000); + + return ar9170_echo_test(ar, 0xaabbccdd); +} + +static int ar9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f) +{ + int idx = nfreqs - 2; + + while (idx >= 0) { + if (f >= freqs[idx]) + return idx; + idx--; + } + + return 0; +} + +static s32 ar9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + /* nothing to interpolate, it's horizontal */ + if (y2 == y1) + return y1; + + /* check if we hit one of the edges */ + if (x == x1) + return y1; + if (x == x2) + return y2; + + /* x1 == x2 is bad, hopefully == x */ + if (x2 == x1) + return y1; + + return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1)); +} + +static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2) +{ +#define SHIFT 8 + s32 y; + + y = ar9170_interpolate_s32(x << SHIFT, + x1 << SHIFT, y1 << SHIFT, + x2 << SHIFT, y2 << SHIFT); + + /* + * XXX: unwrap this expression + * Isn't it just DIV_ROUND_UP(y, 1<> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1)); +#undef SHIFT +} + +static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw) +{ + struct ar9170_calibration_target_power_legacy *ctpl; + struct ar9170_calibration_target_power_ht *ctph; + u8 *ctpres; + int ntargets; + int idx, i, n; + u8 ackpower, ackchains, f; + u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS]; + + if (freq < 3000) + f = freq - 2300; + else + f = (freq - 4800)/5; + + /* + * cycle through the various modes + * + * legacy modes first: 5G, 2G CCK, 2G OFDM + */ + for (i = 0; i < 3; i++) { + switch (i) { + case 0: /* 5 GHz legacy */ + ctpl = &ar->eeprom.cal_tgt_pwr_5G[0]; + ntargets = AR5416_NUM_5G_TARGET_PWRS; + ctpres = ar->power_5G_leg; + break; + case 1: /* 2.4 GHz CCK */ + ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0]; + ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS; + ctpres = ar->power_2G_cck; + break; + case 2: /* 2.4 GHz OFDM */ + ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0]; + ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS; + ctpres = ar->power_2G_ofdm; + break; + default: + BUG(); + } + + for (n = 0; n < ntargets; n++) { + if (ctpl[n].freq == 0xff) + break; + pwr_freqs[n] = ctpl[n].freq; + } + ntargets = n; + idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f); + for (n = 0; n < 4; n++) + ctpres[n] = ar9170_interpolate_u8( + f, + ctpl[idx + 0].freq, + ctpl[idx + 0].power[n], + ctpl[idx + 1].freq, + ctpl[idx + 1].power[n]); + } + + /* + * HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40 + */ + for (i = 0; i < 4; i++) { + switch (i) { + case 0: /* 5 GHz HT 20 */ + ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0]; + ntargets = AR5416_NUM_5G_TARGET_PWRS; + ctpres = ar->power_5G_ht20; + break; + case 1: /* 5 GHz HT 40 */ + ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0]; + ntargets = AR5416_NUM_5G_TARGET_PWRS; + ctpres = ar->power_5G_ht40; + break; + case 2: /* 2.4 GHz HT 20 */ + ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0]; + ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS; + ctpres = ar->power_2G_ht20; + break; + case 3: /* 2.4 GHz HT 40 */ + ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0]; + ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS; + ctpres = ar->power_2G_ht40; + break; + default: + BUG(); + } + + for (n = 0; n < ntargets; n++) { + if (ctph[n].freq == 0xff) + break; + pwr_freqs[n] = ctph[n].freq; + } + ntargets = n; + idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f); + for (n = 0; n < 8; n++) + ctpres[n] = ar9170_interpolate_u8( + f, + ctph[idx + 0].freq, + ctph[idx + 0].power[n], + ctph[idx + 1].freq, + ctph[idx + 1].power[n]); + } + + /* set ACK/CTS TX power */ + ar9170_regwrite_begin(ar); + + if (ar->eeprom.tx_mask != 1) + ackchains = AR9170_TX_PHY_TXCHAIN_2; + else + ackchains = AR9170_TX_PHY_TXCHAIN_1; + + if (freq < 3000) + ackpower = ar->power_2G_ofdm[0] & 0x3f; + else + ackpower = ar->power_5G_leg[0] & 0x3f; + + ar9170_regwrite(0x1c3694, ackpower << 20 | ackchains << 26); + ar9170_regwrite(0x1c3bb4, ackpower << 5 | ackchains << 11 | + ackpower << 21 | ackchains << 27); + + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +static int ar9170_calc_noise_dbm(u32 raw_noise) +{ + if (raw_noise & 0x100) + return ~((raw_noise & 0x0ff) >> 1); + else + return (raw_noise & 0xff) >> 1; +} + +int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, + enum ar9170_rf_init_mode rfi, enum ar9170_bw bw) +{ + const struct ar9170_phy_freq_params *freqpar; + u32 cmd, tmp, offs; + __le32 vals[8]; + int i, err; + bool bandswitch; + + /* clear BB heavy clip enable */ + err = ar9170_write_reg(ar, 0x1c59e0, 0x200); + if (err) + return err; + + /* may be NULL at first setup */ + if (ar->channel) + bandswitch = ar->channel->band != channel->band; + else + bandswitch = true; + + /* HW workaround */ + if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] && + channel->center_freq <= 2417) + bandswitch = true; + + err = ar->exec_cmd(ar, AR9170_CMD_FREQ_START, 0, NULL, 0, NULL); + if (err) + return err; + + if (rfi != AR9170_RFI_NONE || bandswitch) { + u32 val = 0x400; + + if (rfi == AR9170_RFI_COLD) + val = 0x800; + + /* warm/cold reset BB/ADDA */ + err = ar9170_write_reg(ar, 0x1d4004, val); + if (err) + return err; + + err = ar9170_write_reg(ar, 0x1d4004, 0x0); + if (err) + return err; + + err = ar9170_init_phy(ar, channel->band); + if (err) + return err; + + err = ar9170_init_rf_banks_0_7(ar, + channel->band == IEEE80211_BAND_5GHZ); + if (err) + return err; + + cmd = AR9170_CMD_RF_INIT; + } else { + cmd = AR9170_CMD_FREQUENCY; + } + + err = ar9170_init_rf_bank4_pwr(ar, + channel->band == IEEE80211_BAND_5GHZ, + channel->center_freq, bw); + if (err) + return err; + + switch (bw) { + case AR9170_BW_20: + tmp = 0x240; + offs = 0; + break; + case AR9170_BW_40_BELOW: + tmp = 0x2c4; + offs = 3; + break; + case AR9170_BW_40_ABOVE: + tmp = 0x2d4; + offs = 1; + break; + default: + BUG(); + return -ENOSYS; + } + + if (0 /* 2 streams capable */) + tmp |= 0x100; + + err = ar9170_write_reg(ar, 0x1c5804, tmp); + if (err) + return err; + + err = ar9170_set_power_cal(ar, channel->center_freq, bw); + if (err) + return err; + + freqpar = ar9170_get_hw_dyn_params(channel, bw); + + vals[0] = cpu_to_le32(channel->center_freq * 1000); + vals[1] = cpu_to_le32(bw == AR9170_BW_20 ? 0 : 1); + vals[2] = cpu_to_le32(offs << 2 | 1); + vals[3] = cpu_to_le32(freqpar->coeff_exp); + vals[4] = cpu_to_le32(freqpar->coeff_man); + vals[5] = cpu_to_le32(freqpar->coeff_exp_shgi); + vals[6] = cpu_to_le32(freqpar->coeff_man_shgi); + vals[7] = cpu_to_le32(1000); + + err = ar->exec_cmd(ar, cmd, sizeof(vals), (u8 *)vals, + sizeof(vals), (u8 *)vals); + if (err) + return err; + + for (i = 0; i < 2; i++) { + ar->noise[i] = ar9170_calc_noise_dbm( + (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff); + + ar->noise[i + 2] = ar9170_calc_noise_dbm( + (le32_to_cpu(vals[5 + i]) >> 23) & 0x1ff); + } + + ar->channel = channel; + return 0; +} -- cgit v0.10.2 From b63a2cb30405777033d58045c562a3b04d87d702 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Sat, 21 Mar 2009 23:05:48 +0100 Subject: ar9170: ar9170: USB frontend driver USB frontend driver code for Atheros' AR9170 modules. Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ar9170/usb.c new file mode 100644 index 0000000..ede511e --- /dev/null +++ b/drivers/net/wireless/ar9170/usb.c @@ -0,0 +1,747 @@ +/* + * Atheros AR9170 driver + * + * USB - frontend + * + * Copyright 2008, Johannes Berg + * Copyright 2009, Christian Lamparter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "ar9170.h" +#include "cmd.h" +#include "hw.h" +#include "usb.h" + +MODULE_AUTHOR("Christian Lamparter "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("USB Driver for Atheros AR9170 based devices"); +MODULE_FIRMWARE("ar9170-1.fw"); +MODULE_FIRMWARE("ar9170-2.fw"); + +static struct usb_device_id ar9170_usb_ids[] = { + /* Atheros 9170 */ + { USB_DEVICE(0x0cf3, 0x9170) }, + /* Atheros TG121N */ + { USB_DEVICE(0x0cf3, 0x1001) }, + /* D-Link DWA 160A */ + { USB_DEVICE(0x07d1, 0x3c10) }, + /* Netgear WNDA3100 */ + { USB_DEVICE(0x0846, 0x9010) }, + /* Netgear WN111 v2 */ + { USB_DEVICE(0x0846, 0x9001) }, + /* Zydas ZD1221 */ + { USB_DEVICE(0x0ace, 0x1221) }, + /* Z-Com UB81 BG */ + { USB_DEVICE(0x0cde, 0x0023) }, + /* Z-Com UB82 ABG */ + { USB_DEVICE(0x0cde, 0x0026) }, + /* Arcadyan WN7512 */ + { USB_DEVICE(0x083a, 0xf522) }, + /* Planex GWUS300 */ + { USB_DEVICE(0x2019, 0x5304) }, + /* IO-Data WNGDNUS2 */ + { USB_DEVICE(0x04bb, 0x093f) }, + + /* terminate */ + {} +}; +MODULE_DEVICE_TABLE(usb, ar9170_usb_ids); + +static void ar9170_usb_tx_urb_complete_free(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct ar9170_usb *aru = (struct ar9170_usb *) + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + + if (!aru) { + dev_kfree_skb_irq(skb); + return ; + } + + ar9170_handle_tx_status(&aru->common, skb, false, + AR9170_TX_STATUS_COMPLETE); +} + +static void ar9170_usb_tx_urb_complete(struct urb *urb) +{ +} + +static void ar9170_usb_irq_completed(struct urb *urb) +{ + struct ar9170_usb *aru = urb->context; + + switch (urb->status) { + /* everything is fine */ + case 0: + break; + + /* disconnect */ + case -ENOENT: + case -ECONNRESET: + case -ENODEV: + case -ESHUTDOWN: + goto free; + + default: + goto resubmit; + } + + print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET, + urb->transfer_buffer, urb->actual_length); + +resubmit: + usb_anchor_urb(urb, &aru->rx_submitted); + if (usb_submit_urb(urb, GFP_ATOMIC)) { + usb_unanchor_urb(urb); + goto free; + } + + return; + +free: + usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma); +} + +static void ar9170_usb_rx_completed(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct ar9170_usb *aru = (struct ar9170_usb *) + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + int err; + + if (!aru) + goto free; + + switch (urb->status) { + /* everything is fine */ + case 0: + break; + + /* disconnect */ + case -ENOENT: + case -ECONNRESET: + case -ENODEV: + case -ESHUTDOWN: + goto free; + + default: + goto resubmit; + } + + skb_put(skb, urb->actual_length); + ar9170_rx(&aru->common, skb); + +resubmit: + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); + + usb_anchor_urb(urb, &aru->rx_submitted); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + usb_unanchor_urb(urb); + dev_kfree_skb_irq(skb); + } + + return ; + +free: + dev_kfree_skb_irq(skb); + return; +} + +static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru, + struct urb *urb, gfp_t gfp) +{ + struct sk_buff *skb; + + skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp); + if (!skb) + return -ENOMEM; + + /* reserve some space for mac80211's radiotap */ + skb_reserve(skb, 32); + + usb_fill_bulk_urb(urb, aru->udev, + usb_rcvbulkpipe(aru->udev, AR9170_EP_RX), + skb->data, min(skb_tailroom(skb), + AR9170_MAX_RX_BUFFER_SIZE), + ar9170_usb_rx_completed, skb); + + return 0; +} + +static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru) +{ + struct urb *urb = NULL; + void *ibuf; + int err = -ENOMEM; + + /* initialize interrupt endpoint */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + goto out; + + ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma); + if (!ibuf) + goto out; + + usb_fill_int_urb(urb, aru->udev, + usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf, + 64, ar9170_usb_irq_completed, aru, 1); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_anchor_urb(urb, &aru->rx_submitted); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_buffer_free(aru->udev, 64, urb->transfer_buffer, + urb->transfer_dma); + } + +out: + usb_free_urb(urb); + return err; +} + +static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru) +{ + struct urb *urb; + int i; + int err = -EINVAL; + + for (i = 0; i < AR9170_NUM_RX_URBS; i++) { + err = -ENOMEM; + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + goto err_out; + + err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL); + if (err) { + usb_free_urb(urb); + goto err_out; + } + + usb_anchor_urb(urb, &aru->rx_submitted); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + dev_kfree_skb_any((void *) urb->transfer_buffer); + usb_free_urb(urb); + goto err_out; + } + usb_free_urb(urb); + } + + /* the device now waiting for a firmware. */ + aru->common.state = AR9170_IDLE; + return 0; + +err_out: + + usb_kill_anchored_urbs(&aru->rx_submitted); + return err; +} + +static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru) +{ + int ret; + + aru->common.state = AR9170_UNKNOWN_STATE; + + usb_unlink_anchored_urbs(&aru->tx_submitted); + + /* give the LED OFF command and the deauth frame a chance to air. */ + ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, + msecs_to_jiffies(100)); + if (ret == 0) + dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); + usb_poison_anchored_urbs(&aru->tx_submitted); + + usb_poison_anchored_urbs(&aru->rx_submitted); +} + +static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, + unsigned int plen, void *payload, + unsigned int outlen, void *out) +{ + struct ar9170_usb *aru = (void *) ar; + struct urb *urb = NULL; + unsigned long flags; + int err = -ENOMEM; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return -EPERM; + + if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4)) + return -EINVAL; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (unlikely(!urb)) + goto err_free; + + ar->cmdbuf[0] = cpu_to_le32(plen); + ar->cmdbuf[0] |= cpu_to_le32(cmd << 8); + /* writing multiple regs fills this buffer already */ + if (plen && payload != (u8 *)(&ar->cmdbuf[1])) + memcpy(&ar->cmdbuf[1], payload, plen); + + spin_lock_irqsave(&aru->common.cmdlock, flags); + aru->readbuf = (u8 *)out; + aru->readlen = outlen; + spin_unlock_irqrestore(&aru->common.cmdlock, flags); + + usb_fill_int_urb(urb, aru->udev, + usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), + aru->common.cmdbuf, plen + 4, + ar9170_usb_tx_urb_complete, NULL, 1); + + usb_anchor_urb(urb, &aru->tx_submitted); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err_unbuf; + } + usb_free_urb(urb); + + err = wait_for_completion_timeout(&aru->cmd_wait, HZ); + if (err == 0) { + err = -ETIMEDOUT; + goto err_unbuf; + } + + if (outlen >= 0 && aru->readlen != outlen) { + err = -EMSGSIZE; + goto err_unbuf; + } + + return 0; + +err_unbuf: + /* Maybe the device was removed in the second we were waiting? */ + if (IS_STARTED(ar)) { + dev_err(&aru->udev->dev, "no command feedback " + "received (%d).\n", err); + + /* provide some maybe useful debug information */ + print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE, + aru->common.cmdbuf, plen + 4); + dump_stack(); + } + + /* invalidate to avoid completing the next prematurely */ + spin_lock_irqsave(&aru->common.cmdlock, flags); + aru->readbuf = NULL; + aru->readlen = 0; + spin_unlock_irqrestore(&aru->common.cmdlock, flags); + +err_free: + + return err; +} + +static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb, + bool txstatus_needed, unsigned int extra_len) +{ + struct ar9170_usb *aru = (struct ar9170_usb *) ar; + struct urb *urb; + int err; + + if (unlikely(!IS_STARTED(ar))) { + /* Seriously, what were you drink... err... thinking!? */ + return -EPERM; + } + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (unlikely(!urb)) + return -ENOMEM; + + usb_fill_bulk_urb(urb, aru->udev, + usb_sndbulkpipe(aru->udev, AR9170_EP_TX), + skb->data, skb->len + extra_len, (txstatus_needed ? + ar9170_usb_tx_urb_complete : + ar9170_usb_tx_urb_complete_free), skb); + urb->transfer_flags |= URB_ZERO_PACKET; + + usb_anchor_urb(urb, &aru->tx_submitted); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) + usb_unanchor_urb(urb); + + usb_free_urb(urb); + return err; +} + +static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer) +{ + struct ar9170_usb *aru = (void *) ar; + unsigned long flags; + u32 in, out; + + if (!buffer) + return ; + + in = le32_to_cpup((__le32 *)buffer); + out = le32_to_cpu(ar->cmdbuf[0]); + + /* mask off length byte */ + out &= ~0xFF; + + if (aru->readlen >= 0) { + /* add expected length */ + out |= aru->readlen; + } else { + /* add obtained length */ + out |= in & 0xFF; + } + + /* + * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response + * length and we cannot predict the correct length in advance. + * So we only check if we provided enough space for the data. + */ + if (unlikely(out < in)) { + dev_warn(&aru->udev->dev, "received invalid command response " + "got %d bytes, instead of %d bytes " + "and the resp length is %d bytes\n", + in, out, len); + print_hex_dump_bytes("ar9170 invalid resp: ", + DUMP_PREFIX_OFFSET, buffer, len); + /* + * Do not complete, then the command times out, + * and we get a stack trace from there. + */ + return ; + } + + spin_lock_irqsave(&aru->common.cmdlock, flags); + if (aru->readbuf && len > 0) { + memcpy(aru->readbuf, buffer + 4, len - 4); + aru->readbuf = NULL; + } + complete(&aru->cmd_wait); + spin_unlock_irqrestore(&aru->common.cmdlock, flags); +} + +static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data, + size_t len, u32 addr, bool complete) +{ + int transfer, err; + u8 *buf = kmalloc(4096, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + while (len) { + transfer = min_t(int, len, 4096); + memcpy(buf, data, transfer); + + err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0), + 0x30 /* FW DL */, 0x40 | USB_DIR_OUT, + addr >> 8, 0, buf, transfer, 1000); + + if (err < 0) { + kfree(buf); + return err; + } + + len -= transfer; + data += transfer; + addr += transfer; + } + kfree(buf); + + if (complete) { + err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0), + 0x31 /* FW DL COMPLETE */, + 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000); + } + + return 0; +} + +static int ar9170_usb_request_firmware(struct ar9170_usb *aru) +{ + int err = 0; + + err = request_firmware(&aru->init_values, "ar9170-1.fw", + &aru->udev->dev); + if (err) { + dev_err(&aru->udev->dev, "file with init values not found.\n"); + return err; + } + + err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev); + if (err) { + release_firmware(aru->init_values); + dev_err(&aru->udev->dev, "firmware file not found.\n"); + return err; + } + + return err; +} + +static int ar9170_usb_reset(struct ar9170_usb *aru) +{ + int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING); + + if (lock) { + ret = usb_lock_device_for_reset(aru->udev, aru->intf); + if (ret < 0) { + dev_err(&aru->udev->dev, "unable to lock device " + "for reset (%d).\n", ret); + return ret; + } + } + + ret = usb_reset_device(aru->udev); + if (lock) + usb_unlock_device(aru->udev); + + /* let it rest - for a second - */ + msleep(1000); + + return ret; +} + +static int ar9170_usb_upload_firmware(struct ar9170_usb *aru) +{ + int err; + + /* First, upload initial values to device RAM */ + err = ar9170_usb_upload(aru, aru->init_values->data, + aru->init_values->size, 0x102800, false); + if (err) { + dev_err(&aru->udev->dev, "firmware part 1 " + "upload failed (%d).\n", err); + return err; + } + + /* Then, upload the firmware itself and start it */ + return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size, + 0x200000, true); +} + +static int ar9170_usb_init_transport(struct ar9170_usb *aru) +{ + struct ar9170 *ar = (void *) &aru->common; + int err; + + ar9170_regwrite_begin(ar); + + /* Set USB Rx stream mode MAX packet number to 2 */ + ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4); + + /* Set USB Rx stream mode timeout to 10us */ + ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80); + + ar9170_regwrite_finish(); + + err = ar9170_regwrite_result(); + if (err) + dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err); + + return err; +} + +static void ar9170_usb_stop(struct ar9170 *ar) +{ + struct ar9170_usb *aru = (void *) ar; + int ret; + + if (IS_ACCEPTING_CMD(ar)) + aru->common.state = AR9170_STOPPED; + + /* lets wait a while until the tx - queues are dried out */ + ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, + msecs_to_jiffies(1000)); + if (ret == 0) + dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); + + usb_poison_anchored_urbs(&aru->tx_submitted); + + /* + * Note: + * So far we freed all tx urbs, but we won't dare to touch any rx urbs. + * Else we would end up with a unresponsive device... + */ +} + +static int ar9170_usb_open(struct ar9170 *ar) +{ + struct ar9170_usb *aru = (void *) ar; + int err; + + usb_unpoison_anchored_urbs(&aru->tx_submitted); + err = ar9170_usb_init_transport(aru); + if (err) { + usb_poison_anchored_urbs(&aru->tx_submitted); + return err; + } + + aru->common.state = AR9170_IDLE; + return 0; +} + +static int ar9170_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct ar9170_usb *aru; + struct ar9170 *ar; + struct usb_device *udev; + int err; + + aru = ar9170_alloc(sizeof(*aru)); + if (IS_ERR(aru)) { + err = PTR_ERR(aru); + goto out; + } + + udev = interface_to_usbdev(intf); + usb_get_dev(udev); + aru->udev = udev; + aru->intf = intf; + ar = &aru->common; + + usb_set_intfdata(intf, aru); + SET_IEEE80211_DEV(ar->hw, &udev->dev); + + init_usb_anchor(&aru->rx_submitted); + init_usb_anchor(&aru->tx_submitted); + init_completion(&aru->cmd_wait); + + aru->common.stop = ar9170_usb_stop; + aru->common.open = ar9170_usb_open; + aru->common.tx = ar9170_usb_tx; + aru->common.exec_cmd = ar9170_usb_exec_cmd; + aru->common.callback_cmd = ar9170_usb_callback_cmd; + + err = ar9170_usb_reset(aru); + if (err) + goto err_unlock; + + err = ar9170_usb_request_firmware(aru); + if (err) + goto err_unlock; + + err = ar9170_usb_alloc_rx_irq_urb(aru); + if (err) + goto err_freefw; + + err = ar9170_usb_alloc_rx_bulk_urbs(aru); + if (err) + goto err_unrx; + + err = ar9170_usb_upload_firmware(aru); + if (err) { + err = ar9170_echo_test(&aru->common, 0x60d43110); + if (err) { + /* force user invention, by disabling the device */ + err = usb_driver_set_configuration(aru->udev, -1); + dev_err(&aru->udev->dev, "device is in a bad state. " + "please reconnect it!\n"); + goto err_unrx; + } + } + + err = ar9170_usb_open(ar); + if (err) + goto err_unrx; + + err = ar9170_register(ar, &udev->dev); + + ar9170_usb_stop(ar); + if (err) + goto err_unrx; + + return 0; + +err_unrx: + ar9170_usb_cancel_urbs(aru); + +err_freefw: + release_firmware(aru->init_values); + release_firmware(aru->firmware); + +err_unlock: + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + ieee80211_free_hw(ar->hw); +out: + return err; +} + +static void ar9170_usb_disconnect(struct usb_interface *intf) +{ + struct ar9170_usb *aru = usb_get_intfdata(intf); + + if (!aru) + return; + + aru->common.state = AR9170_IDLE; + ar9170_unregister(&aru->common); + ar9170_usb_cancel_urbs(aru); + + release_firmware(aru->init_values); + release_firmware(aru->firmware); + + usb_put_dev(aru->udev); + usb_set_intfdata(intf, NULL); + ieee80211_free_hw(aru->common.hw); +} + +static struct usb_driver ar9170_driver = { + .name = "ar9170usb", + .probe = ar9170_usb_probe, + .disconnect = ar9170_usb_disconnect, + .id_table = ar9170_usb_ids, + .soft_unbind = 1, +}; + +static int __init ar9170_init(void) +{ + return usb_register(&ar9170_driver); +} + +static void __exit ar9170_exit(void) +{ + usb_deregister(&ar9170_driver); +} + +module_init(ar9170_init); +module_exit(ar9170_exit); diff --git a/drivers/net/wireless/ar9170/usb.h b/drivers/net/wireless/ar9170/usb.h new file mode 100644 index 0000000..f585292 --- /dev/null +++ b/drivers/net/wireless/ar9170/usb.h @@ -0,0 +1,74 @@ +/* + * Atheros AR9170 USB driver + * + * Driver specific definitions + * + * Copyright 2008, Johannes Berg + * Copyright 2009, Christian Lamparter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __USB_H +#define __USB_H + +#include +#include +#include +#include +#include +#include +#include +#include "eeprom.h" +#include "hw.h" +#include "ar9170.h" + +#define AR9170_NUM_RX_URBS 16 + +struct firmware; + +struct ar9170_usb { + struct ar9170 common; + struct usb_device *udev; + struct usb_interface *intf; + + struct usb_anchor rx_submitted; + struct usb_anchor tx_submitted; + + spinlock_t cmdlock; + struct completion cmd_wait; + int readlen; + u8 *readbuf; + + const struct firmware *init_values; + const struct firmware *firmware; +}; + +#endif /* __USB_H */ -- cgit v0.10.2 From 75ca88c9701bf8f65fc1f009f10dd64b0378b977 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Sat, 21 Mar 2009 23:11:49 +0100 Subject: ar9170: update Makefile, Kconfig and MAINTAINERS This patch update all necessary Makefile and Kconfig files. Now you can then enable ar9170 in the kernel configuration under: Device Drivers ---> [*] Network device support ---> Wireless LAN ---> [*] Wireless LAN (IEEE 802.11) Atheros AR9170 support Atheros AR9170 USB support Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/MAINTAINERS b/MAINTAINERS index bdadcb3..64c89c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -765,6 +765,14 @@ L: linux-wireless@vger.kernel.org L: ath9k-devel@lists.ath9k.org S: Supported +ATHEROS AR9170 WIRELESS DRIVER +P: Christian Lamparter +M: chunkeey@web.de +L: linux-wireless@vger.kernel.org +W: http://wireless.kernel.org/en/users/Drivers/ar9170 +S: Maintained +F: drivers/net/wireless/ar9170/ + ATI_REMOTE2 DRIVER P: Ville Syrjala M: syrjala@sci.fi diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 612fffe..8a08235 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -485,6 +485,7 @@ config MWL8K source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/ath5k/Kconfig" source "drivers/net/wireless/ath9k/Kconfig" +source "drivers/net/wireless/ar9170/Kconfig" source "drivers/net/wireless/ipw2x00/Kconfig" source "drivers/net/wireless/iwlwifi/Kconfig" source "drivers/net/wireless/hostap/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index d780487..5e7c9ac 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -57,5 +57,6 @@ obj-$(CONFIG_P54_COMMON) += p54/ obj-$(CONFIG_ATH5K) += ath5k/ obj-$(CONFIG_ATH9K) += ath9k/ +obj-$(CONFIG_AR9170_COMMON) += ar9170/ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o diff --git a/drivers/net/wireless/ar9170/Kconfig b/drivers/net/wireless/ar9170/Kconfig new file mode 100644 index 0000000..f661187 --- /dev/null +++ b/drivers/net/wireless/ar9170/Kconfig @@ -0,0 +1,28 @@ +config AR9170_COMMON + tristate "Atheros AR9170 support" + depends on WLAN_80211 && MAC80211 && EXPERIMENTAL + help + This is common code for AR9170 based devices. + This module does nothing by itself - the USB/(SPI) frontends + also need to be enabled in order to support any devices. + + Say Y if you have the hardware, or M to build a module called + ar9170common. + +config AR9170_USB + tristate "Atheros AR9170 USB support" + depends on AR9170_COMMON && USB + select FW_LOADER + help + This is a driver for the Atheros "otus" 802.11n USB devices. + + These devices require additional firmware (2 files). + For now, these files can be downloaded from here: + http://wireless.kernel.org/en/users/Drivers/ar9170 + + If you choose to build a module, it'll be called ar9170usb. + +config AR9170_LEDS + bool + depends on AR9170_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_COMMON) + default y diff --git a/drivers/net/wireless/ar9170/Makefile b/drivers/net/wireless/ar9170/Makefile new file mode 100644 index 0000000..59b174d --- /dev/null +++ b/drivers/net/wireless/ar9170/Makefile @@ -0,0 +1,5 @@ +ar9170common-objs += main.o cmd.o mac.o phy.o led.o +ar9170usb-objs += usb.o + +obj-$(CONFIG_AR9170_COMMON) += ar9170common.o +obj-$(CONFIG_AR9170_USB) += ar9170usb.o -- cgit v0.10.2 From af83debf5bb44257082d4489ac86123a0cadf6d3 Mon Sep 17 00:00:00 2001 From: Tulio Magno Quites Machado Filho Date: Sun, 22 Mar 2009 01:41:13 +0100 Subject: ath5k: Support LED's on Acer Extensa 5620z Add vendor ID for Quanta Microsystems and update the led table with the reported device. Reported-by: Scott Barnes Signed-off-by: Tulio Magno Quites Machado Filho Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/led.c b/drivers/net/wireless/ath5k/led.c index 0686e12..19555fb 100644 --- a/drivers/net/wireless/ath5k/led.c +++ b/drivers/net/wireless/ath5k/led.c @@ -65,6 +65,8 @@ static const struct pci_device_id ath5k_led_devices[] = { { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, /* E-machines E510 (tuliom@gmail.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) }, + /* Acer Extensa 5620z (nekoreeve@gmail.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) }, { } }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 097f410..05dfa7c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2271,6 +2271,8 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 #define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff +#define PCI_VENDOR_ID_QMI 0x1a32 + #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 -- cgit v0.10.2 From 3cf335d527ba6af80f4143f3c9e5136afdb143af Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Sun, 22 Mar 2009 21:57:06 +0200 Subject: mac80211: decrease execution of the associated timer Currently the timer is triggering every two seconds (IEEE80211_MONITORING_INTERVAL). Decrease the timer to only trigger during data idle periods to avoid waking up CPU unnecessary. The timer will still trigger during idle periods, that needs to be fixed later. There's also a functional change that probe requests are sent only when the data path is idle, earlier they were sent also while there was activity on the data path. This is also preparation for the beacon filtering support. Thanks to Johannes Berg for the idea. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 564167f..055bb77 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1083,6 +1083,8 @@ void ieee80211_dynamic_ps_timer(unsigned long data); void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int powersave); +void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, enum queue_stop_reason reason); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c05be09..209abb0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -909,6 +909,21 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata) mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); } +void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr) +{ + /* + * We can postpone the mgd.timer whenever receiving unicast frames + * from AP because we know that the connection is working both ways + * at that time. But multicast frames (and hence also beacons) must + * be ignored here, because we need to trigger the timer during + * data idle periods for sending the periodical probe request to + * the AP. + */ + if (!is_multicast_ether_addr(hdr->addr1)) + mod_timer(&sdata->u.mgd.timer, + jiffies + IEEE80211_MONITORING_INTERVAL); +} static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 47d395a..dbfb284 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -856,6 +856,9 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) if (!(rx->flags & IEEE80211_RX_RA_MATCH)) return RX_CONTINUE; + if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_sta_rx_notify(rx->sdata, hdr); + sta->rx_fragments++; sta->rx_bytes += rx->skb->len; sta->last_signal = rx->status->signal; -- cgit v0.10.2 From 15b7b0629c8213905926394dc73d600e0ca250ce Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Sun, 22 Mar 2009 21:57:14 +0200 Subject: mac80211: track beacons separately from the rx path activity Separate beacon and rx path tracking in preparation for the beacon filtering support. At the same time change ieee80211_associated() to look a bit simpler. Probe requests are now sent only after IEEE80211_PROBE_IDLE_TIME, which is now set to 60 seconds. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 055bb77..8a617a7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -308,6 +308,7 @@ struct ieee80211_if_managed { unsigned long request; unsigned long last_probe; + unsigned long last_beacon; unsigned int flags; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 209abb0..8f30f4d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -30,7 +30,7 @@ #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) #define IEEE80211_ASSOC_MAX_TRIES 3 #define IEEE80211_MONITORING_INTERVAL (2 * HZ) -#define IEEE80211_PROBE_INTERVAL (60 * HZ) +#define IEEE80211_PROBE_IDLE_TIME (60 * HZ) #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) /* utils */ @@ -930,7 +930,7 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct sta_info *sta; - int disassoc; + bool disassoc = false; /* TODO: start monitoring current AP signal quality and number of * missed beacons. Scan other channels every now and then and search @@ -945,36 +945,39 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) if (!sta) { printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", sdata->dev->name, ifmgd->bssid); - disassoc = 1; - } else { - disassoc = 0; - if (time_after(jiffies, - sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { - if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) { - printk(KERN_DEBUG "%s: No ProbeResp from " - "current AP %pM - assume out of " - "range\n", - sdata->dev->name, ifmgd->bssid); - disassoc = 1; - } else - ieee80211_send_probe_req(sdata, ifmgd->bssid, - ifmgd->ssid, - ifmgd->ssid_len, - NULL, 0); - ifmgd->flags ^= IEEE80211_STA_PROBEREQ_POLL; - } else { - ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; - if (time_after(jiffies, ifmgd->last_probe + - IEEE80211_PROBE_INTERVAL)) { - ifmgd->last_probe = jiffies; - ieee80211_send_probe_req(sdata, ifmgd->bssid, - ifmgd->ssid, - ifmgd->ssid_len, - NULL, 0); - } - } + disassoc = true; + goto unlock; + } + + if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && + time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { + printk(KERN_DEBUG "%s: no probe response from AP %pM " + "- disassociating\n", + sdata->dev->name, ifmgd->bssid); + disassoc = true; + ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; + goto unlock; + } + + if (time_after(jiffies, + ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) { + printk(KERN_DEBUG "%s: beacon loss from AP %pM " + "- sending probe request\n", + sdata->dev->name, ifmgd->bssid); + ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; + ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, + ifmgd->ssid_len, NULL, 0); + goto unlock; + + } + + if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { + ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; + ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, + ifmgd->ssid_len, NULL, 0); } + unlock: rcu_read_unlock(); if (disassoc) @@ -1374,6 +1377,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, bss_conf->assoc_capability = capab_info; ieee80211_set_associated(sdata, changed); + /* + * initialise the time of last beacon to be the association time, + * otherwise beacon loss check will trigger immediately + */ + ifmgd->last_beacon = jiffies; + ieee80211_associated(sdata); cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len); } @@ -1422,9 +1431,12 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, size_t len, struct ieee80211_rx_status *rx_status) { + struct ieee80211_if_managed *ifmgd; size_t baselen; struct ieee802_11_elems elems; + ifmgd = &sdata->u.mgd; + if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) return; /* ignore ProbeResp to foreign address */ @@ -1439,11 +1451,14 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, /* direct probe may be part of the association flow */ if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, - &sdata->u.mgd.request)) { + &ifmgd->request)) { printk(KERN_DEBUG "%s direct probe responded\n", sdata->dev->name); ieee80211_authenticate(sdata); } + + if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) + ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; } static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index dbfb284..eff59f3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -850,7 +850,11 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. */ - sta->last_rx = jiffies; + if (rx->sdata->vif.type == NL80211_IFTYPE_STATION && + ieee80211_is_beacon(hdr->frame_control)) { + rx->sdata->u.mgd.last_beacon = jiffies; + } else + sta->last_rx = jiffies; } if (!(rx->flags & IEEE80211_RX_RA_MATCH)) -- cgit v0.10.2 From 9050bdd8589c373e01e41ddbd9a192de2ff01ef0 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Sun, 22 Mar 2009 21:57:21 +0200 Subject: mac80211: disable power save when scanning When software scanning we need to disable power save so that all possible probe responses and beacons are received. For hardware scanning assume that hardware will take care of that and document that assumption. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/include/net/mac80211.h b/include/net/mac80211.h index daa539a..174dc1d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1307,11 +1307,13 @@ enum ieee80211_ampdu_mlme_action { * * @hw_scan: Ask the hardware to service the scan request, no need to start * the scan state machine in stack. The scan must honour the channel - * configuration done by the regulatory agent in the wiphy's registered - * bands. When the scan finishes, ieee80211_scan_completed() must be - * called; note that it also must be called when the scan cannot finish - * because the hardware is turned off! Anything else is a bug! - * Returns a negative error code which will be seen in userspace. + * configuration done by the regulatory agent in the wiphy's + * registered bands. The hardware (or the driver) needs to make sure + * that power save is disabled. When the scan finishes, + * ieee80211_scan_completed() must be called; note that it also must + * be called when the scan cannot finish because the hardware is + * turned off! Anything else is a bug! Returns a negative error code + * which will be seen in userspace. * * @sw_scan_start: Notifier function that is called just before a software scan * is started. Can be NULL, if the driver doesn't need this notification. diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 46f35dc..3bf9839 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -214,6 +214,66 @@ void ieee80211_scan_failed(struct ieee80211_local *local) local->scan_req = NULL; } +/* + * inform AP that we will go to sleep so that it will buffer the frames + * while we scan + */ +static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + bool ps = false; + + /* FIXME: what to do when local->pspolling is true? */ + + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + ps = true; + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + + if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) + /* + * If power save was enabled, no need to send a nullfunc + * frame because AP knows that we are sleeping. But if the + * hardware is creating the nullfunc frame for power save + * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not + * enabled) and power save was enabled, the firmware just + * sent a null frame with power save disabled. So we need + * to send a new nullfunc frame to inform the AP that we + * are again sleeping. + */ + ieee80211_send_nullfunc(local, sdata, 1); +} + +/* inform AP that we are awake again, unless power save is enabled */ +static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + if (!local->powersave) + ieee80211_send_nullfunc(local, sdata, 0); + else { + /* + * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware + * will send a nullfunc frame with the powersave bit set + * even though the AP already knows that we are sleeping. + * This could be avoided by sending a null frame with power + * save bit disabled before enabling the power save, but + * this doesn't gain anything. + * + * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need + * to send a nullfunc frame because AP already knows that + * we are sleeping, let's just enable power save mode in + * hardware. + */ + local->hw.conf.flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } +} + void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) { struct ieee80211_local *local = hw_to_local(hw); @@ -268,7 +328,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) /* Tell AP we're back */ if (sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { - ieee80211_send_nullfunc(local, sdata, 0); + ieee80211_scan_ps_disable(sdata); netif_tx_wake_all_queues(sdata->dev); } } else @@ -441,7 +501,7 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, if (sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { netif_tx_stop_all_queues(sdata->dev); - ieee80211_send_nullfunc(local, sdata, 1); + ieee80211_scan_ps_enable(sdata); } } else netif_tx_stop_all_queues(sdata->dev); -- cgit v0.10.2 From a08c1c1ac0c26229ca1ca45d554b209a56edc8be Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Sun, 22 Mar 2009 21:57:28 +0200 Subject: cfg80211: add feature to hold bss In beacon filtering there needs to be a way to not expire the BSS even when no beacons are received. Add an interface to cfg80211 to hold BSS and make sure that it's not expired. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index dca4a6b..5389afd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -539,6 +539,7 @@ enum cfg80211_signal_type { * is no guarantee that these are well-formed!) * @len_information_elements: total length of the information elements * @signal: signal strength value (type depends on the wiphy's signal_type) + * @hold: BSS should not expire * @free_priv: function pointer to free private data * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ @@ -940,4 +941,21 @@ void cfg80211_send_rx_deauth(struct net_device *dev, const u8 *buf, void cfg80211_send_rx_disassoc(struct net_device *dev, const u8 *buf, size_t len); +/** + * cfg80211_hold_bss - exclude bss from expiration + * @bss: bss which should not expire + * + * In a case when the BSS is not updated but it shouldn't expire this + * function can be used to mark the BSS to be excluded from expiration. + */ +void cfg80211_hold_bss(struct cfg80211_bss *bss); + +/** + * cfg80211_unhold_bss - remove expiration exception from the BSS + * @bss: bss which can expire again + * + * This function marks the BSS to be expirable again. + */ +void cfg80211_unhold_bss(struct cfg80211_bss *bss); + #endif /* __NET_CFG80211_H */ diff --git a/net/wireless/core.h b/net/wireless/core.h index 6acd483..97a6fd8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -90,6 +90,8 @@ struct cfg80211_internal_bss { struct rb_node rbn; unsigned long ts; struct kref ref; + bool hold; + /* must be last because of priv member */ struct cfg80211_bss pub; }; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 280dbcd..2a00e36 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -80,7 +80,8 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev) bool expired = false; list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { - if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) + if (bss->hold || + !time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) continue; list_del(&bss->list); rb_erase(&bss->rbn, &dev->bss_tree); @@ -471,6 +472,30 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) } EXPORT_SYMBOL(cfg80211_unlink_bss); +void cfg80211_hold_bss(struct cfg80211_bss *pub) +{ + struct cfg80211_internal_bss *bss; + + if (!pub) + return; + + bss = container_of(pub, struct cfg80211_internal_bss, pub); + bss->hold = true; +} +EXPORT_SYMBOL(cfg80211_hold_bss); + +void cfg80211_unhold_bss(struct cfg80211_bss *pub) +{ + struct cfg80211_internal_bss *bss; + + if (!pub) + return; + + bss = container_of(pub, struct cfg80211_internal_bss, pub); + bss->hold = false; +} +EXPORT_SYMBOL(cfg80211_unhold_bss); + #ifdef CONFIG_WIRELESS_EXT int cfg80211_wext_siwscan(struct net_device *dev, struct iw_request_info *info, -- cgit v0.10.2 From 04de83815993714a7ba2618f637fa1092a5f664b Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Sun, 22 Mar 2009 21:57:35 +0200 Subject: mac80211: add beacon filtering support Add IEEE80211_HW_BEACON_FILTERING flag so that driver inform that it supports beacon filtering. Drivers need to call the new function ieee80211_beacon_loss() to notify about beacon loss. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl index 8af6d96..fbeaffc 100644 --- a/Documentation/DocBook/mac80211.tmpl +++ b/Documentation/DocBook/mac80211.tmpl @@ -227,6 +227,12 @@ usage should require reading the full document. !Pinclude/net/mac80211.h Powersave support + + Beacon filter support +!Pinclude/net/mac80211.h Beacon filter support +!Finclude/net/mac80211.h ieee80211_beacon_loss + + Multiple queues and QoS support TBD diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 174dc1d..d881ed8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -882,6 +882,10 @@ enum ieee80211_tkip_key_type { * * @IEEE80211_HW_MFP_CAPABLE: * Hardware supports management frame protection (MFP, IEEE 802.11w). + * + * @IEEE80211_HW_BEACON_FILTER: + * Hardware supports dropping of irrelevant beacon frames to + * avoid waking up cpu. */ enum ieee80211_hw_flags { IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, @@ -897,6 +901,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, + IEEE80211_HW_BEACON_FILTER = 1<<14, }; /** @@ -1121,6 +1126,24 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, */ /** + * DOC: Beacon filter support + * + * Some hardware have beacon filter support to reduce host cpu wakeups + * which will reduce system power consumption. It usuallly works so that + * the firmware creates a checksum of the beacon but omits all constantly + * changing elements (TSF, TIM etc). Whenever the checksum changes the + * beacon is forwarded to the host, otherwise it will be just dropped. That + * way the host will only receive beacons where some relevant information + * (for example ERP protection or WMM settings) have changed. + * + * Beacon filter support is informed with %IEEE80211_HW_BEACON_FILTER flag. + * The driver needs to enable beacon filter support whenever power save is + * enabled, that is %IEEE80211_CONF_PS is set. When power save is enabled, + * the stack will not check for beacon miss at all and the driver needs to + * notify about complete loss of beacons with ieee80211_beacon_loss(). + */ + +/** * DOC: Frame filtering * * mac80211 requires to see many management frames for proper @@ -1970,6 +1993,16 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, const u8 *addr); +/** + * ieee80211_beacon_loss - inform hardware does not receive beacons + * + * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. + * + * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and + * IEEE80211_CONF_PS is set, the driver needs to inform whenever the + * hardware is not receiving beacons with this function. + */ +void ieee80211_beacon_loss(struct ieee80211_vif *vif); /* Rate control API */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8a617a7..acba78e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -275,6 +275,7 @@ struct ieee80211_if_managed { struct timer_list chswitch_timer; struct work_struct work; struct work_struct chswitch_work; + struct work_struct beacon_loss_work; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; @@ -1086,6 +1087,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, int powersave); void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr); +void ieee80211_beacon_loss_work(struct work_struct *work); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, enum queue_stop_reason reason); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index dd2a276f..91e8e1b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -477,6 +477,9 @@ static int ieee80211_stop(struct net_device *dev) */ cancel_work_sync(&sdata->u.mgd.work); cancel_work_sync(&sdata->u.mgd.chswitch_work); + + cancel_work_sync(&sdata->u.mgd.beacon_loss_work); + /* * When we get here, the interface is marked down. * Call synchronize_rcu() to wait for the RX path diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8f30f4d..7ecda9d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -610,6 +610,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, bss_info_changed |= ieee80211_handle_bss_capability(sdata, bss->cbss.capability, bss->has_erp_value, bss->erp_value); + cfg80211_hold_bss(&bss->cbss); + ieee80211_rx_bss_put(local, bss); } @@ -751,6 +753,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; + struct ieee80211_conf *conf = &local_to_hw(local)->conf; + struct ieee80211_bss *bss; struct sta_info *sta; u32 changed = 0, config_changed = 0; @@ -774,6 +778,15 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_sta_tear_down_BA_sessions(sta); + bss = ieee80211_rx_bss_get(local, ifmgd->bssid, + conf->channel->center_freq, + ifmgd->ssid, ifmgd->ssid_len); + + if (bss) { + cfg80211_unhold_bss(&bss->cbss); + ieee80211_rx_bss_put(local, bss); + } + if (self_disconnected) { if (deauth) ieee80211_send_deauth_disassoc(sdata, @@ -925,6 +938,33 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, jiffies + IEEE80211_MONITORING_INTERVAL); } +void ieee80211_beacon_loss_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.beacon_loss_work); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " + "- sending probe request\n", sdata->dev->name, + sdata->u.mgd.bssid); + + ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; + ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, + ifmgd->ssid_len, NULL, 0); + + mod_timer(&ifmgd->timer, jiffies + IEEE80211_MONITORING_INTERVAL); +} + +void ieee80211_beacon_loss(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + queue_work(sdata->local->hw.workqueue, + &sdata->u.mgd.beacon_loss_work); +} +EXPORT_SYMBOL(ieee80211_beacon_loss); + static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -959,7 +999,13 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) goto unlock; } - if (time_after(jiffies, + /* + * Beacon filtering is only enabled with power save and then the + * stack should not check for beacon loss. + */ + if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) && + (local->hw.conf.flags & IEEE80211_CONF_PS)) && + time_after(jiffies, ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) { printk(KERN_DEBUG "%s: beacon loss from AP %pM " "- sending probe request\n", @@ -1869,6 +1915,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ifmgd = &sdata->u.mgd; INIT_WORK(&ifmgd->work, ieee80211_sta_work); INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); + INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); setup_timer(&ifmgd->timer, ieee80211_sta_timer, (unsigned long) sdata); setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, -- cgit v0.10.2 From 4a48e2a484e5cf99da4795cf2d6916e057d533ad Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Mon, 23 Mar 2009 12:15:43 +0100 Subject: ar9170: simplify & deBUG tx_status queueing and reporting This patch simplifies the tx_status report code by using four tx_queues per station instead of only one. (the skb lookup should be in O(1) now :-p ). Also, it fixes a really obvious copy&paste bug in the janitor work code and adds back a few spilled bits to the hardware definition header about QoS. Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ar9170/ar9170.h b/drivers/net/wireless/ar9170/ar9170.h index c49d713..f4fb2e9 100644 --- a/drivers/net/wireless/ar9170/ar9170.h +++ b/drivers/net/wireless/ar9170/ar9170.h @@ -159,7 +159,7 @@ struct ar9170 { }; struct ar9170_sta_info { - struct sk_buff_head tx_status; + struct sk_buff_head tx_status[__AR9170_NUM_TXQ]; }; #define IS_STARTED(a) (a->state >= AR9170_STARTED) diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ar9170/hw.h index ad205f8..13091bd 100644 --- a/drivers/net/wireless/ar9170/hw.h +++ b/drivers/net/wireless/ar9170/hw.h @@ -397,10 +397,21 @@ struct ar9170_cmd_response { }; } __packed; +/* QoS */ + /* mac80211 queue to HW/FW map */ static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 }; /* HW/FW queue to mac80211 map */ static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 }; +enum ar9170_txq { + AR9170_TXQ_BE, + AR9170_TXQ_BK, + AR9170_TXQ_VI, + AR9170_TXQ_VO, + + __AR9170_NUM_TXQ, +}; + #endif /* __AR9170_HW_H */ diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c index a3b5323..f8c2357 100644 --- a/drivers/net/wireless/ar9170/main.c +++ b/drivers/net/wireless/ar9170/main.c @@ -277,54 +277,6 @@ static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, return NULL; } -static struct sk_buff *ar9170_find_skb_in_queue_by_mac(struct ar9170 *ar, - const u8 *mac, - struct sk_buff_head *q) -{ - unsigned long flags; - struct sk_buff *skb; - - spin_lock_irqsave(&q->lock, flags); - skb_queue_walk(q, skb) { - struct ar9170_tx_control *txc = (void *) skb->data; - struct ieee80211_hdr *hdr = (void *) txc->frame_data; - - if (compare_ether_addr(ieee80211_get_DA(hdr), mac)) - continue; - - __skb_unlink(skb, q); - spin_unlock_irqrestore(&q->lock, flags); - return skb; - } - spin_unlock_irqrestore(&q->lock, flags); - return NULL; -} - -static struct sk_buff *ar9170_find_skb_in_queue_by_txcq(struct ar9170 *ar, - const u32 queue, - struct sk_buff_head *q) -{ - unsigned long flags; - struct sk_buff *skb; - - spin_lock_irqsave(&q->lock, flags); - skb_queue_walk(q, skb) { - struct ar9170_tx_control *txc = (void *) skb->data; - u32 txc_queue = (le32_to_cpu(txc->phy_control) & - AR9170_TX_PHY_QOS_MASK) >> - AR9170_TX_PHY_QOS_SHIFT; - - if (queue != txc_queue) - continue; - - __skb_unlink(skb, q); - spin_unlock_irqrestore(&q->lock, flags); - return skb; - } - spin_unlock_irqrestore(&q->lock, flags); - return NULL; -} - static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, const u32 queue) { @@ -344,21 +296,21 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, if (likely(sta)) { struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv; - skb = ar9170_find_skb_in_queue_by_txcq(ar, queue, - &sta_priv->tx_status); - } else { - /* STA is not in database (yet/anymore). */ + skb = skb_dequeue(&sta_priv->tx_status[queue]); + rcu_read_unlock(); + if (likely(skb)) + return skb; + } else + rcu_read_unlock(); - /* scan the waste bin for likely candidates */ + /* scan the waste queue for candidates */ + skb = ar9170_find_skb_in_queue(ar, mac, queue, + &ar->global_tx_status_waste); + if (!skb) { + /* so it still _must_ be in the global list. */ skb = ar9170_find_skb_in_queue(ar, mac, queue, - &ar->global_tx_status_waste); - if (!skb) { - /* so it still _must_ be in the global list. */ - skb = ar9170_find_skb_in_queue(ar, mac, queue, - &ar->global_tx_status); - } + &ar->global_tx_status); } - rcu_read_unlock(); #ifdef AR9170_QUEUE_DEBUG if (unlikely((!skb) && net_ratelimit())) { @@ -367,7 +319,6 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, wiphy_name(ar->hw->wiphy), mac, queue); } #endif /* AR9170_QUEUE_DEBUG */ - return skb; } @@ -381,9 +332,13 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, static void ar9170_tx_status_janitor(struct work_struct *work) { struct ar9170 *ar = container_of(work, struct ar9170, - filter_config_work); + tx_status_janitor.work); struct sk_buff *skb; + if (unlikely(!IS_STARTED(ar))) + return ; + + mutex_lock(&ar->mutex); /* recycle the garbage back to mac80211... one by one. */ while ((skb = skb_dequeue(&ar->global_tx_status_waste))) { #ifdef AR9170_QUEUE_DEBUG @@ -395,8 +350,6 @@ static void ar9170_tx_status_janitor(struct work_struct *work) AR9170_TX_STATUS_FAILED); } - - while ((skb = skb_dequeue(&ar->global_tx_status))) { #ifdef AR9170_QUEUE_DEBUG printk(KERN_DEBUG "%s: moving frame into waste queue =>\n", @@ -411,6 +364,8 @@ static void ar9170_tx_status_janitor(struct work_struct *work) if (skb_queue_len(&ar->global_tx_status_waste) > 0) queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, msecs_to_jiffies(100)); + + mutex_unlock(&ar->mutex); } static void ar9170_handle_command_response(struct ar9170 *ar, @@ -1012,7 +967,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->control.sta) { sta_info = (void *) info->control.sta->drv_priv; - skb_queue_tail(&sta_info->tx_status, skb); + skb_queue_tail(&sta_info->tx_status[queue], skb); } else { skb_queue_tail(&ar->global_tx_status, skb); @@ -1025,7 +980,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) err = ar->tx(ar, skb, tx_status, 0); if (unlikely(tx_status && err)) { if (info->control.sta) - skb_unlink(skb, &sta_info->tx_status); + skb_unlink(skb, &sta_info->tx_status[queue]); else skb_unlink(skb, &ar->global_tx_status); } @@ -1479,55 +1434,35 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw, struct ar9170 *ar = hw->priv; struct ar9170_sta_info *info = (void *) sta->drv_priv; struct sk_buff *skb; + unsigned int i; switch (cmd) { case STA_NOTIFY_ADD: - skb_queue_head_init(&info->tx_status); - - /* - * do we already have a frame that needs tx_status - * from this station in our queue? - * If so then transfer them to the new station queue. - */ - - /* preserve the correct order - check the waste bin first */ - while ((skb = ar9170_find_skb_in_queue_by_mac(ar, sta->addr, - &ar->global_tx_status_waste))) - skb_queue_tail(&info->tx_status, skb); - - /* now the still pending frames */ - while ((skb = ar9170_find_skb_in_queue_by_mac(ar, sta->addr, - &ar->global_tx_status))) - skb_queue_tail(&info->tx_status, skb); - -#ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: STA[%pM] has %d queued frames =>\n", - wiphy_name(ar->hw->wiphy), sta->addr, - skb_queue_len(&info->tx_status)); - - ar9170_dump_station_tx_status_queue(ar, &info->tx_status); -#endif /* AR9170_QUEUE_DEBUG */ - - + for (i = 0; i < ar->hw->queues; i++) + skb_queue_head_init(&info->tx_status[i]); break; case STA_NOTIFY_REMOVE: /* * transfer all outstanding frames that need a tx_status - * reports to the fallback queue + * reports to the global tx_status queue */ - while ((skb = skb_dequeue(&ar->global_tx_status))) { + for (i = 0; i < ar->hw->queues; i++) { + while ((skb = skb_dequeue(&info->tx_status[i]))) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: queueing frame in global " - "tx_status queue =>\n", - wiphy_name(ar->hw->wiphy)); + printk(KERN_DEBUG "%s: queueing frame in " + "global tx_status queue =>\n", + wiphy_name(ar->hw->wiphy)); - ar9170_print_txheader(ar, skb); + ar9170_print_txheader(ar, skb); #endif /* AR9170_QUEUE_DEBUG */ - skb_queue_tail(&ar->global_tx_status_waste, skb); + skb_queue_tail(&ar->global_tx_status, skb); + } } + queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, + msecs_to_jiffies(100)); break; default: @@ -1571,7 +1506,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, int ret; mutex_lock(&ar->mutex); - if ((param) && !(queue > 4)) { + if ((param) && !(queue > ar->hw->queues)) { memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], param, sizeof(*param)); @@ -1635,7 +1570,7 @@ void *ar9170_alloc(size_t priv_size) IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; - ar->hw->queues = 4; + ar->hw->queues = __AR9170_NUM_TXQ; ar->hw->extra_tx_headroom = 8; ar->hw->sta_data_size = sizeof(struct ar9170_sta_info); -- cgit v0.10.2 From 2b874e83c970b45c328ab12239b066a43505454c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 14:10:22 +0100 Subject: mac80211: rate control status only for controlled packets This patch changes mac80211 to not notify the rate control algorithm's tx_status() method when reporting status for a packet that didn't go through the rate control algorithm's get_rate() method. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/net/mac80211.h b/include/net/mac80211.h index d881ed8..6f3bc4c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -245,6 +245,9 @@ struct ieee80211_bss_conf { * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be * set by rate control algorithms to indicate probe rate, will * be cleared for fragmented frames (except on the last fragment) + * @IEEE80211_TX_INTFL_RCALGO: mac80211 internal flag, do not test or + * set this flag in the driver; indicates that the rate control + * algorithm was used and should be notified of TX status */ enum mac80211_tx_control_flags { IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), @@ -260,6 +263,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_STAT_AMPDU = BIT(10), IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), + IEEE80211_TX_INTFL_RCALGO = BIT(13), }; /** diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 3fa7ab2..4641f00 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -219,10 +219,12 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, info->control.rates[i].count = 1; } - if (sta && sdata->force_unicast_rateidx > -1) + if (sta && sdata->force_unicast_rateidx > -1) { info->control.rates[0].idx = sdata->force_unicast_rateidx; - else + } else { ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); + info->flags |= IEEE80211_TX_INTFL_RCALGO; + } /* * try to enforce the maximum rate the user wanted diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index b9164c9..2ab5ad9 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -44,8 +44,10 @@ static inline void rate_control_tx_status(struct ieee80211_local *local, struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); + if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO)) + ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); } -- cgit v0.10.2 From a1bfa0eb98e2fedd05a64a4a8943ea8f6f7c5469 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Mon, 23 Mar 2009 15:49:33 +0100 Subject: p54: Kconfig maintenance This patch updates p54's Kconfig entry and removes the out-dated device list. Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig index 7d6e9d1..b45d6a4 100644 --- a/drivers/net/wireless/p54/Kconfig +++ b/drivers/net/wireless/p54/Kconfig @@ -1,9 +1,10 @@ config P54_COMMON tristate "Softmac Prism54 support" - depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL + depends on MAC80211 && WLAN_80211 && EXPERIMENTAL + select FW_LOADER ---help--- - This is common code for isl38xx based cards. - This module does nothing by itself - the USB/PCI frontends + This is common code for isl38xx/stlc45xx based modules. + This module does nothing by itself - the USB/PCI/SPI front-ends also need to be enabled in order to support any devices. These devices require softmac firmware which can be found at @@ -17,31 +18,6 @@ config P54_USB select CRC32 ---help--- This driver is for USB isl38xx based wireless cards. - These are USB based adapters found in devices such as: - - 3COM 3CRWE254G72 - SMC 2862W-G - Accton 802.11g WN4501 USB - Siemens Gigaset USB - Netgear WG121 - Netgear WG111 - Medion 40900, Roper Europe - Shuttle PN15, Airvast WM168g, IOGear GWU513 - Linksys WUSB54G - Linksys WUSB54G Portable - DLink DWL-G120 Spinnaker - DLink DWL-G122 - Belkin F5D7050 ver 1000 - Cohiba Proto board - SMC 2862W-G version 2 - U.S. Robotics U5 802.11g Adapter - FUJITSU E-5400 USB D1700 - Sagem XG703A - DLink DWL-G120 Cohiba - Spinnaker Proto board - Linksys WUSB54AG - Inventel UR054G - Spinnaker DUT These devices require softmac firmware which can be found at http://prism54.org/ -- cgit v0.10.2 From a3c0b87c4f21911fb7185902dd13f0e3cd7f33f7 Mon Sep 17 00:00:00 2001 From: Lorenzo Nava Date: Sun, 22 Mar 2009 19:15:41 +0100 Subject: b43: fix b43_plcp_get_bitrate_idx_ofdm return type This patch fixes the return type of b43_plcp_get_bitrate_idx_ofdm. If the plcp contains an error, the function return value is 255 instead of -1, and the packet was not dropped. This causes a warning in __ieee80211_rx function because rate idx is out of range. Cc: stable@kernel.org Signed-off-by: Lorenzo Nava Signed-off-by: Michael Buesch Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index 0f53c7e..a63d888 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -50,7 +50,7 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp) } /* Extract the bitrate index out of an OFDM PLCP header. */ -static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy) +static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy) { int base = aphy ? 0 : 4; -- cgit v0.10.2 From b726604706ad88d8b28bc487e45e710f58cc19ee Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Mon, 2 Mar 2009 21:55:18 -0500 Subject: ath5k: warn and correct rate for unknown hw rate indexes ath5k sets up a mapping table from the hardware rate index to the rate index used by mac80211; however, we have seen some received frames with incorrect rate indexes. Such frames normally get dropped with a warning in __ieee80211_rx(), but it doesn't include enough information to track down the error. This patch adds a warning to hw_to_driver_rix for any lookups that result in a rate index of -1, then returns a valid rate so the frame can be processed. Changes-licensed-under: 3-Clause-BSD Signed-off-by: Bob Copeland Cc: stable@kernel.org Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index f28b86c..6580df2 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1088,9 +1088,18 @@ ath5k_mode_setup(struct ath5k_softc *sc) static inline int ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) { - WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, - "hw_rix out of bounds: %x\n", hw_rix); - return sc->rate_idx[sc->curband->band][hw_rix]; + int rix; + + /* return base rate on errors */ + if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, + "hw_rix out of bounds: %x\n", hw_rix)) + return 0; + + rix = sc->rate_idx[sc->curband->band][hw_rix]; + if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) + rix = 0; + + return rix; } /***************\ diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h index 20e0d14b..8229561 100644 --- a/drivers/net/wireless/ath5k/base.h +++ b/drivers/net/wireless/ath5k/base.h @@ -112,7 +112,7 @@ struct ath5k_softc { struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; struct ieee80211_channel channels[ATH_CHAN_MAX]; struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; - u8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; + s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; enum nl80211_iftype opmode; struct ath5k_hw *ah; /* Atheros HW */ -- cgit v0.10.2 From 14344b81ec264efbe59de0183f5ba38650a479a6 Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Sat, 21 Mar 2009 00:00:57 +0100 Subject: rt2x00: New USB ID for rt73usb Signed-off-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 24fdfdf..420fff4 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2425,6 +2425,8 @@ static struct usb_device_id rt73usb_device_table[] = { { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, /* Surecom */ { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Tilgin */ + { USB_DEVICE(0x6933, 0x5001), USB_DEVICE_DATA(&rt73usb_ops) }, /* Philips */ { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) }, /* Planex */ -- cgit v0.10.2 From 051b919188650fe4c93ca8701183ae88439388f6 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Mon, 23 Mar 2009 18:25:01 -0400 Subject: ath9k: fix dma mapping leak of rx buffer upon rmmod We were claiming DMA buffers on the RX tasklet but never upon a simple module removal. Cc: stable@kernel.org Signed-off-by: FUJITA Tomonori Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index 917bac7..71cb18d 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c @@ -344,8 +344,13 @@ void ath_rx_cleanup(struct ath_softc *sc) list_for_each_entry(bf, &sc->rx.rxbuf, list) { skb = bf->bf_mpdu; - if (skb) + if (skb) { + dma_unmap_single(sc->dev, + bf->bf_buf_addr, + sc->rx.bufsize, + DMA_FROM_DEVICE); dev_kfree_skb(skb); + } } if (sc->rx.rxdma.dd_desc_len != 0) -- cgit v0.10.2 From de00c04ecbb482507ace2197782123446a1cfdca Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Tue, 24 Mar 2009 16:21:55 +0100 Subject: ar9170: single module build This patch restores all-in-one module build procedure for ar9170. Signed-off-by: Christian Lamparter Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 5e7c9ac..50e7fba 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -57,6 +57,6 @@ obj-$(CONFIG_P54_COMMON) += p54/ obj-$(CONFIG_ATH5K) += ath5k/ obj-$(CONFIG_ATH9K) += ath9k/ -obj-$(CONFIG_AR9170_COMMON) += ar9170/ +obj-$(CONFIG_AR9170_USB) += ar9170/ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o diff --git a/drivers/net/wireless/ar9170/Kconfig b/drivers/net/wireless/ar9170/Kconfig index f661187..de4281f 100644 --- a/drivers/net/wireless/ar9170/Kconfig +++ b/drivers/net/wireless/ar9170/Kconfig @@ -1,17 +1,6 @@ -config AR9170_COMMON - tristate "Atheros AR9170 support" - depends on WLAN_80211 && MAC80211 && EXPERIMENTAL - help - This is common code for AR9170 based devices. - This module does nothing by itself - the USB/(SPI) frontends - also need to be enabled in order to support any devices. - - Say Y if you have the hardware, or M to build a module called - ar9170common. - config AR9170_USB - tristate "Atheros AR9170 USB support" - depends on AR9170_COMMON && USB + tristate "Atheros AR9170 802.11n USB support" + depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL select FW_LOADER help This is a driver for the Atheros "otus" 802.11n USB devices. @@ -24,5 +13,5 @@ config AR9170_USB config AR9170_LEDS bool - depends on AR9170_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_COMMON) + depends on AR9170_USB && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_USB) default y diff --git a/drivers/net/wireless/ar9170/Makefile b/drivers/net/wireless/ar9170/Makefile index 59b174d..8d91c7e 100644 --- a/drivers/net/wireless/ar9170/Makefile +++ b/drivers/net/wireless/ar9170/Makefile @@ -1,5 +1,3 @@ -ar9170common-objs += main.o cmd.o mac.o phy.o led.o -ar9170usb-objs += usb.o +ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o -obj-$(CONFIG_AR9170_COMMON) += ar9170common.o obj-$(CONFIG_AR9170_USB) += ar9170usb.o diff --git a/drivers/net/wireless/ar9170/cmd.c b/drivers/net/wireless/ar9170/cmd.c index fd5625c..f57a620 100644 --- a/drivers/net/wireless/ar9170/cmd.c +++ b/drivers/net/wireless/ar9170/cmd.c @@ -127,4 +127,3 @@ int ar9170_echo_test(struct ar9170 *ar, u32 v) return 0; } -EXPORT_SYMBOL_GPL(ar9170_echo_test); diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c index f8c2357..5996ff9 100644 --- a/drivers/net/wireless/ar9170/main.c +++ b/drivers/net/wireless/ar9170/main.c @@ -37,14 +37,6 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* - * BIG FAT TODO: - * - * By the looks of things: these devices share a lot of things like - * EEPROM layout/design and PHY code with other Atheros WIFI products. - * So this driver/library will eventually become ath9k code... or vice versa ;-) - */ - #include #include #include @@ -56,9 +48,6 @@ static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); -MODULE_AUTHOR("Johannes Berg "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Atheros shared code for AR9170 wireless devices"); #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ .bitrate = (_bitrate), \ @@ -247,7 +236,6 @@ void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, skb_pull(skb, sizeof(struct ar9170_tx_control)); ieee80211_tx_status_irqsafe(ar->hw, skb); } -EXPORT_SYMBOL_GPL(ar9170_handle_tx_status); static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, const u8 *mac, @@ -630,13 +618,6 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) ieee80211_rx_irqsafe(ar->hw, skb, &status); } -/* - * TODO: - * It looks like AR9170 supports more than just the USB transport interface. - * Unfortunately, there is no available information what parts of the - * precendent and following code fragments is device specific and what not. - * For now, everything stays here, until some SPI chips pop up. - */ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) { unsigned int i, tlen, resplen; @@ -694,7 +675,6 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) printk(KERN_ERR "%s: buffer remains!\n", wiphy_name(ar->hw->wiphy)); } -EXPORT_SYMBOL_GPL(ar9170_rx); #define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \ do { \ @@ -1582,7 +1562,6 @@ void *ar9170_alloc(size_t priv_size) return ar; } -EXPORT_SYMBOL_GPL(ar9170_alloc); static int ar9170_read_eeprom(struct ar9170 *ar) { @@ -1680,7 +1659,6 @@ err_unreg: err_out: return err; } -EXPORT_SYMBOL_GPL(ar9170_register); void ar9170_unregister(struct ar9170 *ar) { @@ -1691,21 +1669,3 @@ void ar9170_unregister(struct ar9170 *ar) ieee80211_unregister_hw(ar->hw); mutex_destroy(&ar->mutex); } -EXPORT_SYMBOL_GPL(ar9170_unregister); - -static int __init ar9170_init(void) -{ - if (modparam_nohwcrypt) - printk(KERN_INFO "ar9170: cryptographic acceleration " - "disabled.\n"); - - return 0; -} - -static void __exit ar9170_exit(void) -{ - -} - -module_init(ar9170_init); -module_exit(ar9170_exit); diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ar9170/usb.c index ede511e..ad29684 100644 --- a/drivers/net/wireless/ar9170/usb.c +++ b/drivers/net/wireless/ar9170/usb.c @@ -47,9 +47,10 @@ #include "hw.h" #include "usb.h" +MODULE_AUTHOR("Johannes Berg "); MODULE_AUTHOR("Christian Lamparter "); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("USB Driver for Atheros AR9170 based devices"); +MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless"); MODULE_FIRMWARE("ar9170-1.fw"); MODULE_FIRMWARE("ar9170-2.fw"); -- cgit v0.10.2 From 5a0fe8ac70f81b5b91156736066e6445d0dcc61f Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Mon, 23 Mar 2009 23:35:37 -0400 Subject: ath5k: properly drop packets from ops->tx We shouldn't return NETDEV_TX_BUSY from the TX callback, especially after we've mucked with the sk_buffs. Drop the packets and return NETDEV_TX_OK. Changes-licensed-under: 3-Clause-BSD Signed-off-by: Bob Copeland Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 6580df2..5d57d77 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -2562,7 +2562,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (skb_headroom(skb) < padsize) { ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" " headroom to pad %d\n", hdrlen, padsize); - return NETDEV_TX_BUSY; + goto drop_packet; } skb_push(skb, padsize); memmove(skb->data, skb->data+padsize, hdrlen); @@ -2573,7 +2573,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); spin_unlock_irqrestore(&sc->txbuflock, flags); ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); - return NETDEV_TX_BUSY; + goto drop_packet; } bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); list_del(&bf->list); @@ -2590,10 +2590,12 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) list_add_tail(&bf->list, &sc->txbuf); sc->txbuf_len++; spin_unlock_irqrestore(&sc->txbuflock, flags); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + goto drop_packet; } + return NETDEV_TX_OK; +drop_packet: + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } -- cgit v0.10.2 From 3832c287f11ba001bbe48e9be8c59cb9f71f6b43 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 24 Mar 2009 08:46:57 +0100 Subject: mac80211: fix RX path My previous patch ("mac80211: remove mixed-cell and userspace MLME code") was too obvious to me, so obvious that a stupid bug crept in. The IBSS RX function must be invoked for IBSS, of course, not anything != IBSS. Reported-by: Larry Finger Signed-off-by: Johannes Berg Tested-by: Larry Finger Signed-off-by: John W. Linville diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index eff59f3..64ebe66 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1884,7 +1884,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) if (ieee80211_vif_is_mesh(&sdata->vif)) return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); - if (sdata->vif.type != NL80211_IFTYPE_ADHOC) + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); if (sdata->vif.type == NL80211_IFTYPE_STATION) -- cgit v0.10.2 From 4bbf4d56583dd52c429d88f43cb614bdbe5deea6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 24 Mar 2009 09:35:46 +0100 Subject: cfg80211: fix locking in nl80211_set_wiphy Luis reports that there's a circular locking dependency; this is because cfg80211_dev_rename() will acquire the cfg80211_mutex while the device mutex is held, while this normally is done the other way around. The solution is to open-code the device-getting in nl80211_set_wiphy and require holding the mutex around cfg80211_dev_rename rather than acquiring it within. Also fix a bug -- rtnl locking is expected by drivers so we need to provide it. Reported-by: Luis R. Rodriguez Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/wireless/core.c b/net/wireless/core.c index 17fe390..d1f5565 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -87,7 +87,7 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) } /* requires cfg80211_mutex to be held! */ -static struct cfg80211_registered_device * +struct cfg80211_registered_device * __cfg80211_drv_from_info(struct genl_info *info) { int ifindex; @@ -176,13 +176,14 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv) mutex_unlock(&drv->mtx); } +/* requires cfg80211_mutex to be held */ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname) { struct cfg80211_registered_device *drv; int wiphy_idx, taken = -1, result, digits; - mutex_lock(&cfg80211_mutex); + assert_cfg80211_lock(); /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); @@ -195,30 +196,23 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, * deny the name if it is phy where is printed * without leading zeroes. taken == strlen(newname) here */ - result = -EINVAL; if (taken == strlen(PHY_NAME) + digits) - goto out_unlock; + return -EINVAL; } /* Ignore nop renames */ - result = 0; if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0) - goto out_unlock; + return 0; /* Ensure another device does not already have this name. */ - list_for_each_entry(drv, &cfg80211_drv_list, list) { - result = -EINVAL; + list_for_each_entry(drv, &cfg80211_drv_list, list) if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0) - goto out_unlock; - } + return -EINVAL; - /* this will only check for collisions in sysfs - * which is not even always compiled in. - */ result = device_rename(&rdev->wiphy.dev, newname); if (result) - goto out_unlock; + return result; if (rdev->wiphy.debugfsdir && !debugfs_rename(rdev->wiphy.debugfsdir->d_parent, @@ -228,13 +222,9 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", newname); - result = 0; -out_unlock: - mutex_unlock(&cfg80211_mutex); - if (result == 0) - nl80211_notify_dev_rename(rdev); + nl80211_notify_dev_rename(rdev); - return result; + return 0; } /* exported functions */ diff --git a/net/wireless/core.h b/net/wireless/core.h index 97a6fd8..d43daa2 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -99,6 +99,9 @@ struct cfg80211_internal_bss { struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx); int get_wiphy_idx(struct wiphy *wiphy); +struct cfg80211_registered_device * +__cfg80211_drv_from_info(struct genl_info *info); + /* * This function returns a pointer to the driver * that the genl_info item that is passed refers to. diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8808431..353e1a4 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -366,16 +366,26 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) int result = 0, rem_txq_params = 0; struct nlattr *nl_txq_params; - rdev = cfg80211_get_dev_from_info(info); - if (IS_ERR(rdev)) - return PTR_ERR(rdev); + rtnl_lock(); + + mutex_lock(&cfg80211_mutex); + + rdev = __cfg80211_drv_from_info(info); + if (IS_ERR(rdev)) { + result = PTR_ERR(rdev); + goto unlock; + } - if (info->attrs[NL80211_ATTR_WIPHY_NAME]) { + mutex_lock(&rdev->mtx); + + if (info->attrs[NL80211_ATTR_WIPHY_NAME]) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); - if (result) - goto bad_res; - } + + mutex_unlock(&cfg80211_mutex); + + if (result) + goto bad_res; if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { struct ieee80211_txq_params txq_params; @@ -471,7 +481,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) bad_res: - cfg80211_put_dev(rdev); + mutex_unlock(&rdev->mtx); + unlock: + rtnl_unlock(); return result; } -- cgit v0.10.2 From dd970e43d86c253ff159d9668499aaf42d175722 Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Tue, 24 Mar 2009 10:36:48 +0100 Subject: b43: Add BCM4307 PCI-ID 00:09.0 Network controller [0280]: Broadcom Corporation BCM4307 Ethernet Controller [14e4:4306] (rev 03) Subsystem: Broadcom Corporation BCM4307 Ethernet Controller [14e4:4306] Flags: bus master, fast devsel, latency 32, IRQ 10 Memory at d7000000 (32-bit, non-prefetchable) [size=8K] Kernel driver in use: b43-pci-bridge Kernel modules: ssb Reported-by: yoann Signed-off-by: Michael Buesch Signed-off-by: John W. Linville diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index 27a6775..ef9c6a0 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c @@ -18,6 +18,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4306) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, -- cgit v0.10.2 From 08df05aa9b25f3079585855506022bb33a011183 Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Tue, 24 Mar 2009 10:02:54 -0700 Subject: iwlwifi: show current driver status in user readable format change the display of current driver status bit to user readable format for better and easier debugging Signed-off-by: Wey-Yi Guy Signed-off-by: Reinette Chatre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 36cfecc..64eb585 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -425,6 +425,56 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, return ret; } +static ssize_t iwl_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", + test_bit(STATUS_HCMD_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n", + test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", + test_bit(STATUS_INT_ENABLED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_SW:\t %d\n", + test_bit(STATUS_RF_KILL_SW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", + test_bit(STATUS_INIT, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", + test_bit(STATUS_TEMPERATURE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", + test_bit(STATUS_GEO_CONFIGURED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_IN_SUSPEND:\t %d\n", + test_bit(STATUS_IN_SUSPEND, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n", + test_bit(STATUS_MODE_PENDING, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + DEBUGFS_READ_WRITE_FILE_OPS(sram); DEBUGFS_WRITE_FILE_OPS(log_event); DEBUGFS_READ_FILE_OPS(eeprom); @@ -432,6 +482,7 @@ DEBUGFS_READ_FILE_OPS(stations); DEBUGFS_READ_FILE_OPS(rx_statistics); DEBUGFS_READ_FILE_OPS(tx_statistics); DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); /* * Create the debugfs files and directories @@ -466,7 +517,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(rx_statistics, data); DEBUGFS_ADD_FILE(tx_statistics, data); DEBUGFS_ADD_FILE(channels, data); - DEBUGFS_ADD_X32(status, data, (u32 *)&priv->status); + DEBUGFS_ADD_FILE(status, data); DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); DEBUGFS_ADD_BOOL(disable_chain_noise, rf, &priv->disable_chain_noise_cal); @@ -496,6 +547,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv) DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event); DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels); + DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status); DEBUGFS_REMOVE(priv->dbgfs->dir_data); DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity); DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise); -- cgit v0.10.2 From 2de8e0d999b8790861cd3749bec2236ccc1c8110 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:35 +0100 Subject: mac80211: rewrite fragmentation Fragmentation currently uses an allocated array to store the fragment skbs, and then keeps track of which have been sent and which are still pending etc. This is rather complicated; make it simpler by just chaining the fragments into skb->next and removing from that list when sent. Also simplifies all code that needs to touch fragments, since it now only needs to walk the skb->next list. This is a prerequisite for fixing the stored packet code, which I need to do for proper aggregation packet storing. Signed-off-by: Johannes Berg Reviewed-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index acba78e..785f636 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -149,11 +149,6 @@ struct ieee80211_tx_data { struct ieee80211_channel *channel; - /* Extra fragments (in addition to the first fragment - * in skb) */ - struct sk_buff **extra_frag; - int num_extra_frag; - u16 ethertype; unsigned int flags; }; @@ -191,8 +186,6 @@ struct ieee80211_rx_data { struct ieee80211_tx_stored_packet { struct sk_buff *skb; - struct sk_buff **extra_frag; - int num_extra_frag; }; struct beacon_data { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f3f240c..51bf49c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -34,8 +34,7 @@ #define IEEE80211_TX_OK 0 #define IEEE80211_TX_AGAIN 1 -#define IEEE80211_TX_FRAG_AGAIN 2 -#define IEEE80211_TX_PENDING 3 +#define IEEE80211_TX_PENDING 2 /* misc utils */ @@ -702,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) return TX_CONTINUE; } +static int ieee80211_fragment(struct ieee80211_local *local, + struct sk_buff *skb, int hdrlen, + int frag_threshold) +{ + struct sk_buff *tail = skb, *tmp; + int per_fragm = frag_threshold - hdrlen - FCS_LEN; + int pos = hdrlen + per_fragm; + int rem = skb->len - hdrlen - per_fragm; + + if (WARN_ON(rem < 0)) + return -EINVAL; + + while (rem) { + int fraglen = per_fragm; + + if (fraglen > rem) + fraglen = rem; + rem -= fraglen; + tmp = dev_alloc_skb(local->tx_headroom + + frag_threshold + + IEEE80211_ENCRYPT_HEADROOM + + IEEE80211_ENCRYPT_TAILROOM); + if (!tmp) + return -ENOMEM; + tail->next = tmp; + tail = tmp; + skb_reserve(tmp, local->tx_headroom + + IEEE80211_ENCRYPT_HEADROOM); + /* copy control information */ + memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); + skb_copy_queue_mapping(tmp, skb); + tmp->priority = skb->priority; + tmp->do_not_encrypt = skb->do_not_encrypt; + tmp->dev = skb->dev; + tmp->iif = skb->iif; + + /* copy header and data */ + memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); + memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); + + pos += fraglen; + } + + skb->len = hdrlen + per_fragm; + return 0; +} + static ieee80211_tx_result debug_noinline ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; - size_t hdrlen, per_fragm, num_fragm, payload_len, left; - struct sk_buff **frags, *first, *frag; - int i; - u16 seq; - u8 *pos; + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; int frag_threshold = tx->local->fragmentation_threshold; + int hdrlen; + int fragnum; if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) return TX_CONTINUE; @@ -725,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) return TX_DROP; - first = tx->skb; - hdrlen = ieee80211_hdrlen(hdr->frame_control); - payload_len = first->len - hdrlen; - per_fragm = frag_threshold - hdrlen - FCS_LEN; - num_fragm = DIV_ROUND_UP(payload_len, per_fragm); - - frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC); - if (!frags) - goto fail; - - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); - seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ; - pos = first->data + hdrlen + per_fragm; - left = payload_len - per_fragm; - for (i = 0; i < num_fragm - 1; i++) { - struct ieee80211_hdr *fhdr; - size_t copylen; - - if (left <= 0) - goto fail; - /* reserve enough extra head and tail room for possible - * encryption */ - frag = frags[i] = - dev_alloc_skb(tx->local->tx_headroom + - frag_threshold + - IEEE80211_ENCRYPT_HEADROOM + - IEEE80211_ENCRYPT_TAILROOM); - if (!frag) - goto fail; - - /* Make sure that all fragments use the same priority so - * that they end up using the same TX queue */ - frag->priority = first->priority; + /* internal error, why is TX_FRAGMENTED set? */ + if (WARN_ON(skb->len <= frag_threshold)) + return TX_DROP; - skb_reserve(frag, tx->local->tx_headroom + - IEEE80211_ENCRYPT_HEADROOM); + /* + * Now fragment the frame. This will allocate all the fragments and + * chain them (using skb as the first fragment) to skb->next. + * During transmission, we will remove the successfully transmitted + * fragments from this list. When the low-level driver rejects one + * of the fragments then we will simply pretend to accept the skb + * but store it away as pending. + */ + if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) + return TX_DROP; - /* copy TX information */ - info = IEEE80211_SKB_CB(frag); - memcpy(info, first->cb, sizeof(frag->cb)); + /* update duration/seq/flags of fragments */ + fragnum = 0; + do { + int next_len; + const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); - /* copy/fill in 802.11 header */ - fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); - memcpy(fhdr, first->data, hdrlen); - fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); + hdr = (void *)skb->data; + info = IEEE80211_SKB_CB(skb); - if (i == num_fragm - 2) { - /* clear MOREFRAGS bit for the last fragment */ - fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); - } else { + if (skb->next) { + hdr->frame_control |= morefrags; + next_len = skb->next->len; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. @@ -787,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) info->control.rates[4].idx = -1; BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; + } else { + hdr->frame_control &= ~morefrags; + next_len = 0; } - - /* copy data */ - copylen = left > per_fragm ? per_fragm : left; - memcpy(skb_put(frag, copylen), pos, copylen); - - skb_copy_queue_mapping(frag, first); - - frag->do_not_encrypt = first->do_not_encrypt; - frag->dev = first->dev; - frag->iif = first->iif; - - pos += copylen; - left -= copylen; - } - skb_trim(first, hdrlen + per_fragm); - - tx->num_extra_frag = num_fragm - 1; - tx->extra_frag = frags; + hdr->duration_id = ieee80211_duration(tx, 0, next_len); + hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); + fragnum++; + } while ((skb = skb->next)); return TX_CONTINUE; - - fail: - if (frags) { - for (i = 0; i < num_fragm - 1; i++) - if (frags[i]) - dev_kfree_skb(frags[i]); - kfree(frags); - } - I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); - return TX_DROP; } static ieee80211_tx_result debug_noinline @@ -845,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; - int next_len, i; - int group_addr = is_multicast_ether_addr(hdr->addr1); - - if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) { - hdr->duration_id = ieee80211_duration(tx, group_addr, 0); - return TX_CONTINUE; - } - - hdr->duration_id = ieee80211_duration(tx, group_addr, - tx->extra_frag[0]->len); + struct sk_buff *skb = tx->skb; + struct ieee80211_hdr *hdr; + int next_len; + bool group_addr; - for (i = 0; i < tx->num_extra_frag; i++) { - if (i + 1 < tx->num_extra_frag) - next_len = tx->extra_frag[i + 1]->len; - else - next_len = 0; + do { + hdr = (void *) skb->data; + next_len = skb->next ? skb->next->len : 0; + group_addr = is_multicast_ether_addr(hdr->addr1); - hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data; - hdr->duration_id = ieee80211_duration(tx, 0, next_len); - } + hdr->duration_id = + ieee80211_duration(tx, group_addr, next_len); + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -873,19 +865,16 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { - int i; + struct sk_buff *skb = tx->skb; if (!tx->sta) return TX_CONTINUE; tx->sta->tx_packets++; - tx->sta->tx_fragments++; - tx->sta->tx_bytes += tx->skb->len; - if (tx->extra_frag) { - tx->sta->tx_fragments += tx->num_extra_frag; - for (i = 0; i < tx->num_extra_frag; i++) - tx->sta->tx_bytes += tx->extra_frag[i]->len; - } + do { + tx->sta->tx_fragments++; + tx->sta->tx_bytes += skb->len; + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -1099,45 +1088,36 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local, return 0; } -static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, +static int __ieee80211_tx(struct ieee80211_local *local, struct ieee80211_tx_data *tx) { + struct sk_buff *skb = tx->skb, *next; struct ieee80211_tx_info *info; - int ret, i; + int ret; + bool fragm = false; - if (skb) { + local->mdev->trans_start = jiffies; + + while (skb) { if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) return IEEE80211_TX_PENDING; - ret = local->ops->tx(local_to_hw(local), skb); - if (ret) - return IEEE80211_TX_AGAIN; - local->mdev->trans_start = jiffies; - ieee80211_led_tx(local, 1); - } - if (tx->extra_frag) { - for (i = 0; i < tx->num_extra_frag; i++) { - if (!tx->extra_frag[i]) - continue; - info = IEEE80211_SKB_CB(tx->extra_frag[i]); + if (fragm) { + info = IEEE80211_SKB_CB(skb); info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); - if (ieee80211_queue_stopped(&local->hw, - skb_get_queue_mapping(tx->extra_frag[i]))) - return IEEE80211_TX_FRAG_AGAIN; - - ret = local->ops->tx(local_to_hw(local), - tx->extra_frag[i]); - if (ret) - return IEEE80211_TX_FRAG_AGAIN; - local->mdev->trans_start = jiffies; - ieee80211_led_tx(local, 1); - tx->extra_frag[i] = NULL; } - kfree(tx->extra_frag); - tx->extra_frag = NULL; + + next = skb->next; + ret = local->ops->tx(local_to_hw(local), skb); + if (ret != NETDEV_TX_OK) + return IEEE80211_TX_AGAIN; + tx->skb = skb = next; + ieee80211_led_tx(local, 1); + fragm = true; } + return IEEE80211_TX_OK; } @@ -1149,7 +1129,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; ieee80211_tx_result res = TX_DROP; - int i; #define CALL_TXH(txh) \ res = txh(tx); \ @@ -1173,11 +1152,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); - dev_kfree_skb(skb); - for (i = 0; i < tx->num_extra_frag; i++) - if (tx->extra_frag[i]) - dev_kfree_skb(tx->extra_frag[i]); - kfree(tx->extra_frag); + while (skb) { + struct sk_buff *next; + + next = skb->next; + dev_kfree_skb(skb); + skb = next; + } return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); @@ -1194,7 +1175,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) struct ieee80211_tx_data tx; ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - int ret, i; + int ret; u16 queue; queue = skb_get_queue_mapping(skb); @@ -1225,7 +1206,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) goto out; retry: - ret = __ieee80211_tx(local, skb, &tx); + ret = __ieee80211_tx(local, &tx); if (ret) { struct ieee80211_tx_stored_packet *store; @@ -1240,9 +1221,6 @@ retry: store = &local->pending_packet[queue]; - if (ret == IEEE80211_TX_FRAG_AGAIN) - skb = NULL; - set_bit(queue, local->queues_pending); smp_mb(); /* @@ -1260,22 +1238,23 @@ retry: clear_bit(queue, local->queues_pending); goto retry; } - store->skb = skb; - store->extra_frag = tx.extra_frag; - store->num_extra_frag = tx.num_extra_frag; + store->skb = tx.skb; } out: rcu_read_unlock(); return 0; drop: - if (skb) - dev_kfree_skb(skb); - for (i = 0; i < tx.num_extra_frag; i++) - if (tx.extra_frag[i]) - dev_kfree_skb(tx.extra_frag[i]); - kfree(tx.extra_frag); rcu_read_unlock(); + + skb = tx.skb; + while (skb) { + struct sk_buff *next; + + next = skb->next; + dev_kfree_skb(skb); + skb = next; + } return 0; } @@ -1810,17 +1789,21 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { - int i, j; - struct ieee80211_tx_stored_packet *store; + struct sk_buff *skb; + int i; for (i = 0; i < local->hw.queues; i++) { if (!test_bit(i, local->queues_pending)) continue; - store = &local->pending_packet[i]; - kfree_skb(store->skb); - for (j = 0; j < store->num_extra_frag; j++) - kfree_skb(store->extra_frag[j]); - kfree(store->extra_frag); + + skb = local->pending_packet[i].skb; + while (skb) { + struct sk_buff *next; + + next = skb->next; + dev_kfree_skb(skb); + skb = next; + } clear_bit(i, local->queues_pending); } } @@ -1854,14 +1837,11 @@ void ieee80211_tx_pending(unsigned long data) netif_start_subqueue(local->mdev, i); store = &local->pending_packet[i]; - tx.extra_frag = store->extra_frag; - tx.num_extra_frag = store->num_extra_frag; tx.flags = 0; - ret = __ieee80211_tx(local, store->skb, &tx); - if (ret) { - if (ret == IEEE80211_TX_FRAG_AGAIN) - store->skb = NULL; - } else { + tx.skb = store->skb; + ret = __ieee80211_tx(local, &tx); + store->skb = tx.skb; + if (!ret) { clear_bit(i, local->queues_pending); ieee80211_wake_queue(&local->hw, i); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 444bb14..021166c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -166,18 +166,13 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; - - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - if (tx->extra_frag) { - struct ieee80211_hdr *fhdr; - int i; - for (i = 0; i < tx->num_extra_frag; i++) { - fhdr = (struct ieee80211_hdr *) - tx->extra_frag[i]->data; - fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - } - } + struct sk_buff *skb = tx->skb; + struct ieee80211_hdr *hdr; + + do { + hdr = (struct ieee80211_hdr *) skb->data; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } while ((skb = skb->next)); } int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 7043ddc..ef73105 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -329,24 +329,17 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) ieee80211_tx_result ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) { - int i; + struct sk_buff *skb; ieee80211_tx_set_protected(tx); - if (wep_encrypt_skb(tx, tx->skb) < 0) { - I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); - return TX_DROP; - } - - if (tx->extra_frag) { - for (i = 0; i < tx->num_extra_frag; i++) { - if (wep_encrypt_skb(tx, tx->extra_frag[i])) { - I802_DEBUG_INC(tx->local-> - tx_handlers_drop_wep); - return TX_DROP; - } + skb = tx->skb; + do { + if (wep_encrypt_skb(tx, skb) < 0) { + I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); + return TX_DROP; } - } + } while ((skb = skb->next)); return TX_CONTINUE; } diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 9101b48..4f8bfea 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -196,19 +196,13 @@ ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; - int i; ieee80211_tx_set_protected(tx); - if (tkip_encrypt_skb(tx, skb) < 0) - return TX_DROP; - - if (tx->extra_frag) { - for (i = 0; i < tx->num_extra_frag; i++) { - if (tkip_encrypt_skb(tx, tx->extra_frag[i])) - return TX_DROP; - } - } + do { + if (tkip_encrypt_skb(tx, skb) < 0) + return TX_DROP; + } while ((skb = skb->next)); return TX_CONTINUE; } @@ -428,19 +422,13 @@ ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; - int i; ieee80211_tx_set_protected(tx); - if (ccmp_encrypt_skb(tx, skb) < 0) - return TX_DROP; - - if (tx->extra_frag) { - for (i = 0; i < tx->num_extra_frag; i++) { - if (ccmp_encrypt_skb(tx, tx->extra_frag[i])) - return TX_DROP; - } - } + do { + if (ccmp_encrypt_skb(tx, skb) < 0) + return TX_DROP; + } while ((skb = skb->next)); return TX_CONTINUE; } -- cgit v0.10.2 From f0e72851f7ad108fed20426b46a18ab5fcd5729f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:36 +0100 Subject: mac80211: fix A-MPDU queue assignment Internally, mac80211 requires the skb's queue mapping to be set to the AC queue, not the virtual A-MPDU queue. This is not done correctly currently, this patch moves the code down to directly before the driver is invoked and adds a comment that it will be moved into the driver later. Since this requires __ieee80211_tx() to have the sta pointer, make sure to provide it in ieee80211_tx_pending(). Signed-off-by: Johannes Berg Reviewed-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 51bf49c..0d97cad 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1024,13 +1024,8 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, spin_lock_irqsave(&tx->sta->lock, flags); state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; - if (*state == HT_AGG_STATE_OPERATIONAL) { + if (*state == HT_AGG_STATE_OPERATIONAL) info->flags |= IEEE80211_TX_CTL_AMPDU; - if (local->hw.ampdu_queues) - skb_set_queue_mapping( - skb, tx->local->hw.queues + - tx->sta->tid_to_tx_q[tid]); - } spin_unlock_irqrestore(&tx->sta->lock, flags); } @@ -1103,10 +1098,29 @@ static int __ieee80211_tx(struct ieee80211_local *local, skb_get_queue_mapping(skb))) return IEEE80211_TX_PENDING; - if (fragm) { - info = IEEE80211_SKB_CB(skb); + info = IEEE80211_SKB_CB(skb); + + if (fragm) info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); + + /* + * Internally, we need to have the queue mapping point to + * the real AC queue, not the virtual A-MPDU queue. This + * now finally sets the queue to what the driver wants. + * We will later move this down into the only driver that + * needs it, iwlwifi. + */ + if (tx->sta && local->hw.ampdu_queues && + info->flags & IEEE80211_TX_CTL_AMPDU) { + unsigned long flags; + u8 *qc = ieee80211_get_qos_ctl((void *) skb->data); + int tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + + spin_lock_irqsave(&tx->sta->lock, flags); + skb_set_queue_mapping(skb, local->hw.queues + + tx->sta->tid_to_tx_q[tid]); + spin_unlock_irqrestore(&tx->sta->lock, flags); } next = skb->next; @@ -1817,9 +1831,11 @@ void ieee80211_tx_pending(unsigned long data) struct ieee80211_local *local = (struct ieee80211_local *)data; struct net_device *dev = local->mdev; struct ieee80211_tx_stored_packet *store; + struct ieee80211_hdr *hdr; struct ieee80211_tx_data tx; int i, ret; + rcu_read_lock(); netif_tx_lock_bh(dev); for (i = 0; i < local->hw.queues; i++) { /* Check that this queue is ok */ @@ -1839,6 +1855,8 @@ void ieee80211_tx_pending(unsigned long data) store = &local->pending_packet[i]; tx.flags = 0; tx.skb = store->skb; + hdr = (struct ieee80211_hdr *)tx.skb->data; + tx.sta = sta_info_get(local, hdr->addr1); ret = __ieee80211_tx(local, &tx); store->skb = tx.skb; if (!ret) { @@ -1847,6 +1865,7 @@ void ieee80211_tx_pending(unsigned long data) } } netif_tx_unlock_bh(dev); + rcu_read_unlock(); } /* functions for drivers to get certain frames */ -- cgit v0.10.2 From 2a577d98712a284a612dd51d69db5cb989810dc2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:37 +0100 Subject: mac80211: rework the pending packets code The pending packets code is quite incomprehensible, uses memory barriers nobody really understands, etc. This patch reworks it entirely, using the queue spinlock, proper stop bits and the skb queues themselves to indicate whether packets are pending or not (rather than a separate variable like before). Signed-off-by: Johannes Berg Reviewed-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 785f636..6ce62e5 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -184,10 +184,6 @@ struct ieee80211_rx_data { u16 tkip_iv16; }; -struct ieee80211_tx_stored_packet { - struct sk_buff *skb; -}; - struct beacon_data { u8 *head, *tail; int head_len, tail_len; @@ -583,6 +579,7 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_CSA, IEEE80211_QUEUE_STOP_REASON_AGGREGATION, IEEE80211_QUEUE_STOP_REASON_SUSPEND, + IEEE80211_QUEUE_STOP_REASON_PENDING, }; struct ieee80211_master_priv { @@ -639,9 +636,7 @@ struct ieee80211_local { struct sta_info *sta_hash[STA_HASH_SIZE]; struct timer_list sta_cleanup; - unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; - unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; - struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES]; + struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; /* number of interfaces with corresponding IFF_ flags */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index dac68d4..a7430e9 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -781,6 +781,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, sta_info_init(local); + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) + skb_queue_head_init(&local->pending[i]); tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, (unsigned long)local); tasklet_disable(&local->tx_pending_tasklet); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0d97cad..ee1b77f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1189,12 +1189,14 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) struct ieee80211_tx_data tx; ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - int ret; + struct sk_buff *next; + unsigned long flags; + int ret, retries; u16 queue; queue = skb_get_queue_mapping(skb); - WARN_ON(test_bit(queue, local->queues_pending)); + WARN_ON(!skb_queue_empty(&local->pending[queue])); if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); @@ -1219,40 +1221,52 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) if (invoke_tx_handlers(&tx)) goto out; -retry: + retries = 0; + retry: ret = __ieee80211_tx(local, &tx); - if (ret) { - struct ieee80211_tx_stored_packet *store; - + switch (ret) { + case IEEE80211_TX_OK: + break; + case IEEE80211_TX_AGAIN: /* * Since there are no fragmented frames on A-MPDU * queues, there's no reason for a driver to reject * a frame there, warn and drop it. */ - if (ret != IEEE80211_TX_PENDING) - if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) - goto drop; + if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) + goto drop; + /* fall through */ + case IEEE80211_TX_PENDING: + skb = tx.skb; - store = &local->pending_packet[queue]; + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - set_bit(queue, local->queues_pending); - smp_mb(); - /* - * When the driver gets out of buffers during sending of - * fragments and calls ieee80211_stop_queue, the netif - * subqueue is stopped. There is, however, a small window - * in which the PENDING bit is not yet set. If a buffer - * gets available in that window (i.e. driver calls - * ieee80211_wake_queue), we would end up with ieee80211_tx - * called with the PENDING bit still set. Prevent this by - * continuing transmitting here when that situation is - * possible to have happened. - */ - if (!__netif_subqueue_stopped(local->mdev, queue)) { - clear_bit(queue, local->queues_pending); + if (__netif_subqueue_stopped(local->mdev, queue)) { + do { + next = skb->next; + skb->next = NULL; + skb_queue_tail(&local->pending[queue], skb); + } while ((skb = next)); + + /* + * Make sure nobody will enable the queue on us + * (without going through the tasklet) nor disable the + * netdev queue underneath the pending handling code. + */ + __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, + &local->queue_stop_reasons[queue]); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + } else { + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + + retries++; + if (WARN(retries > 10, "tx refused but queue active")) + goto drop; goto retry; } - store->skb = tx.skb; } out: rcu_read_unlock(); @@ -1263,8 +1277,6 @@ retry: skb = tx.skb; while (skb) { - struct sk_buff *next; - next = skb->next; dev_kfree_skb(skb); skb = next; @@ -1803,23 +1815,10 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { - struct sk_buff *skb; int i; - for (i = 0; i < local->hw.queues; i++) { - if (!test_bit(i, local->queues_pending)) - continue; - - skb = local->pending_packet[i].skb; - while (skb) { - struct sk_buff *next; - - next = skb->next; - dev_kfree_skb(skb); - skb = next; - } - clear_bit(i, local->queues_pending); - } + for (i = 0; i < local->hw.queues; i++) + skb_queue_purge(&local->pending[i]); } /* @@ -1830,40 +1829,57 @@ void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; struct net_device *dev = local->mdev; - struct ieee80211_tx_stored_packet *store; struct ieee80211_hdr *hdr; + unsigned long flags; struct ieee80211_tx_data tx; int i, ret; + bool next; rcu_read_lock(); netif_tx_lock_bh(dev); - for (i = 0; i < local->hw.queues; i++) { - /* Check that this queue is ok */ - if (__netif_subqueue_stopped(local->mdev, i) && - !test_bit(i, local->queues_pending_run)) - continue; - if (!test_bit(i, local->queues_pending)) { - clear_bit(i, local->queues_pending_run); - ieee80211_wake_queue(&local->hw, i); + for (i = 0; i < local->hw.queues; i++) { + /* + * If queue is stopped by something other than due to pending + * frames, or we have no pending frames, proceed to next queue. + */ + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + next = false; + if (local->queue_stop_reasons[i] != + BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) || + skb_queue_empty(&local->pending[i])) + next = true; + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + if (next) continue; - } - clear_bit(i, local->queues_pending_run); + /* + * start the queue now to allow processing our packets, + * we're under the tx lock here anyway so nothing will + * happen as a result of this + */ netif_start_subqueue(local->mdev, i); - store = &local->pending_packet[i]; - tx.flags = 0; - tx.skb = store->skb; - hdr = (struct ieee80211_hdr *)tx.skb->data; - tx.sta = sta_info_get(local, hdr->addr1); - ret = __ieee80211_tx(local, &tx); - store->skb = tx.skb; - if (!ret) { - clear_bit(i, local->queues_pending); - ieee80211_wake_queue(&local->hw, i); + while (!skb_queue_empty(&local->pending[i])) { + tx.flags = 0; + tx.skb = skb_dequeue(&local->pending[i]); + hdr = (struct ieee80211_hdr *)tx.skb->data; + tx.sta = sta_info_get(local, hdr->addr1); + + ret = __ieee80211_tx(local, &tx); + if (ret != IEEE80211_TX_OK) { + skb_queue_head(&local->pending[i], tx.skb); + break; + } } + + /* Start regular packet processing again. */ + if (skb_queue_empty(&local->pending[i])) + ieee80211_wake_queue_by_reason(&local->hw, i, + IEEE80211_QUEUE_STOP_REASON_PENDING); } + netif_tx_unlock_bh(dev); rcu_read_unlock(); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 021166c..0247d80 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -365,16 +365,16 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, __clear_bit(reason, &local->queue_stop_reasons[queue]); + if (!skb_queue_empty(&local->pending[queue]) && + local->queue_stop_reasons[queue] == + BIT(IEEE80211_QUEUE_STOP_REASON_PENDING)) + tasklet_schedule(&local->tx_pending_tasklet); + if (local->queue_stop_reasons[queue] != 0) /* someone still has this queue stopped */ return; - if (test_bit(queue, local->queues_pending)) { - set_bit(queue, local->queues_pending_run); - tasklet_schedule(&local->tx_pending_tasklet); - } else { - netif_wake_subqueue(local->mdev, queue); - } + netif_wake_subqueue(local->mdev, queue); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, @@ -420,9 +420,15 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; } - __set_bit(reason, &local->queue_stop_reasons[queue]); + /* + * Only stop if it was previously running, this is necessary + * for correct pending packets handling because there we may + * start (but not wake) the queue and rely on that. + */ + if (!local->queue_stop_reasons[queue]) + netif_stop_subqueue(local->mdev, queue); - netif_stop_subqueue(local->mdev, queue); + __set_bit(reason, &local->queue_stop_reasons[queue]); } void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, -- cgit v0.10.2 From 1870cd71e87da1a1afb904f2c84086f487a07135 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:38 +0100 Subject: mac80211: clean up __ieee80211_tx args __ieee80211_tx takes a struct ieee80211_tx_data argument, but only uses a few of its members, namely 'skb' and 'sta'. Make that explicit, so that less internal knowledge is required in ieee80211_tx_pending and the possibility of introducing errors here is removed. Signed-off-by: Johannes Berg Reviewed-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ee1b77f..b909e40 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1084,9 +1084,10 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local, } static int __ieee80211_tx(struct ieee80211_local *local, - struct ieee80211_tx_data *tx) + struct sk_buff **skbp, + struct sta_info *sta) { - struct sk_buff *skb = tx->skb, *next; + struct sk_buff *skb = *skbp, *next; struct ieee80211_tx_info *info; int ret; bool fragm = false; @@ -1111,23 +1112,23 @@ static int __ieee80211_tx(struct ieee80211_local *local, * We will later move this down into the only driver that * needs it, iwlwifi. */ - if (tx->sta && local->hw.ampdu_queues && + if (sta && local->hw.ampdu_queues && info->flags & IEEE80211_TX_CTL_AMPDU) { unsigned long flags; u8 *qc = ieee80211_get_qos_ctl((void *) skb->data); int tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - spin_lock_irqsave(&tx->sta->lock, flags); + spin_lock_irqsave(&sta->lock, flags); skb_set_queue_mapping(skb, local->hw.queues + - tx->sta->tid_to_tx_q[tid]); - spin_unlock_irqrestore(&tx->sta->lock, flags); + sta->tid_to_tx_q[tid]); + spin_unlock_irqrestore(&sta->lock, flags); } next = skb->next; ret = local->ops->tx(local_to_hw(local), skb); if (ret != NETDEV_TX_OK) return IEEE80211_TX_AGAIN; - tx->skb = skb = next; + *skbp = skb = next; ieee80211_led_tx(local, 1); fragm = true; } @@ -1223,7 +1224,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) retries = 0; retry: - ret = __ieee80211_tx(local, &tx); + ret = __ieee80211_tx(local, &tx.skb, tx.sta); switch (ret) { case IEEE80211_TX_OK: break; @@ -1831,7 +1832,6 @@ void ieee80211_tx_pending(unsigned long data) struct net_device *dev = local->mdev; struct ieee80211_hdr *hdr; unsigned long flags; - struct ieee80211_tx_data tx; int i, ret; bool next; @@ -1862,14 +1862,15 @@ void ieee80211_tx_pending(unsigned long data) netif_start_subqueue(local->mdev, i); while (!skb_queue_empty(&local->pending[i])) { - tx.flags = 0; - tx.skb = skb_dequeue(&local->pending[i]); - hdr = (struct ieee80211_hdr *)tx.skb->data; - tx.sta = sta_info_get(local, hdr->addr1); + struct sk_buff *skb = skb_dequeue(&local->pending[i]); + struct sta_info *sta; + + hdr = (struct ieee80211_hdr *)skb->data; + sta = sta_info_get(local, hdr->addr1); - ret = __ieee80211_tx(local, &tx); + ret = __ieee80211_tx(local, &skb, sta); if (ret != IEEE80211_TX_OK) { - skb_queue_head(&local->pending[i], tx.skb); + skb_queue_head(&local->pending[i], skb); break; } } -- cgit v0.10.2 From b1720231ca07dee3382980f3b25e6581bd2e54e9 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:39 +0100 Subject: mac80211: unify and fix TX aggregation start When TX aggregation becomes operational, we do a number of steps: 1) print a debug message 2) wake the virtual queue 3) notify the driver Unfortunately, 1) and 3) are only done if the driver is first to reply to the aggregation request, it is, however, possible that the remote station replies before the driver! Thus, unify the code for this and call the new function ieee80211_agg_tx_operational in both places where TX aggregation can become operational. Additionally, rename the driver notification from IEEE80211_AMPDU_TX_RESUME to IEEE80211_AMPDU_TX_OPERATIONAL. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index c13e4e5..13d4e67 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c @@ -2730,7 +2730,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); break; - case IEEE80211_AMPDU_TX_RESUME: + case IEEE80211_AMPDU_TX_OPERATIONAL: ath_tx_aggr_resume(sc, sta, tid); break; default: diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 6f3bc4c..07fe987 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1236,14 +1236,14 @@ enum ieee80211_filter_flags { * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation * @IEEE80211_AMPDU_TX_START: start Tx aggregation * @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation - * @IEEE80211_AMPDU_TX_RESUME: resume TX aggregation + * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational */ enum ieee80211_ampdu_mlme_action { IEEE80211_AMPDU_RX_START, IEEE80211_AMPDU_RX_STOP, IEEE80211_AMPDU_TX_START, IEEE80211_AMPDU_TX_STOP, - IEEE80211_AMPDU_TX_RESUME, + IEEE80211_AMPDU_TX_OPERATIONAL, }; /** diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index e5776ef..fd718e2 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -404,6 +404,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) } EXPORT_SYMBOL(ieee80211_start_tx_ba_session); +static void ieee80211_agg_tx_operational(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); +#endif + + if (local->hw.ampdu_queues) { + /* + * Wake up the A-MPDU queue, we stopped it earlier, + * this will in turn wake the entire AC. + */ + ieee80211_wake_queue_by_reason(&local->hw, + local->hw.queues + sta->tid_to_tx_q[tid], + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + } + + local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL, + &sta->sta, tid, NULL); +} + void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) { struct ieee80211_local *local = hw_to_local(hw); @@ -446,20 +467,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) *state |= HT_ADDBA_DRV_READY_MSK; - if (*state == HT_AGG_STATE_OPERATIONAL) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); -#endif - if (hw->ampdu_queues) { - /* - * Wake up this queue, we stopped it earlier, - * this will in turn wake the entire AC. - */ - ieee80211_wake_queue_by_reason(hw, - hw->queues + sta->tid_to_tx_q[tid], - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - } - } + if (*state == HT_AGG_STATE_OPERATIONAL) + ieee80211_agg_tx_operational(local, sta, tid); out: spin_unlock_bh(&sta->lock); @@ -646,9 +655,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, struct ieee80211_mgmt *mgmt, size_t len) { - struct ieee80211_hw *hw = &local->hw; - u16 capab; - u16 tid, start_seq_num; + u16 capab, tid; u8 *state; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); @@ -682,26 +689,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, *state |= HT_ADDBA_RECEIVED_MSK; - if (hw->ampdu_queues && *state != curstate && - *state == HT_AGG_STATE_OPERATIONAL) { - /* - * Wake up this queue, we stopped it earlier, - * this will in turn wake the entire AC. - */ - ieee80211_wake_queue_by_reason(hw, - hw->queues + sta->tid_to_tx_q[tid], - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - } - sta->ampdu_mlme.addba_req_num[tid] = 0; + if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) + ieee80211_agg_tx_operational(local, sta, tid); - if (local->ops->ampdu_action) { - (void)local->ops->ampdu_action(hw, - IEEE80211_AMPDU_TX_RESUME, - &sta->sta, tid, &start_seq_num); - } -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ + sta->ampdu_mlme.addba_req_num[tid] = 0; } else { sta->ampdu_mlme.addba_req_num[tid]++; ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); -- cgit v0.10.2 From a220858d30604902f650074bfac5a7598bc97ea4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:40 +0100 Subject: mac80211: add skb length sanity checking We just found a bug in zd1211rw where it would reject packets in the ->tx() method but leave them modified, which would cause retransmit attempts with completely bogus skbs, eventually leading to a panic due to not having enough headroom in those. This patch adds a sanity check to mac80211 to catch such driver mistakes; in this case we warn and drop the skb. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b909e40..a0e00c6 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1089,7 +1089,7 @@ static int __ieee80211_tx(struct ieee80211_local *local, { struct sk_buff *skb = *skbp, *next; struct ieee80211_tx_info *info; - int ret; + int ret, len; bool fragm = false; local->mdev->trans_start = jiffies; @@ -1125,7 +1125,12 @@ static int __ieee80211_tx(struct ieee80211_local *local, } next = skb->next; + len = skb->len; ret = local->ops->tx(local_to_hw(local), skb); + if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { + dev_kfree_skb(skb); + ret = NETDEV_TX_OK; + } if (ret != NETDEV_TX_OK) return IEEE80211_TX_AGAIN; *skbp = skb = next; -- cgit v0.10.2 From cd8ffc800ce18e558335c4946b2217864fc16045 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:41 +0100 Subject: mac80211: fix aggregation to not require queue stop Instead of stopping the entire AC queue when enabling aggregation (which was only done for hardware with aggregation queues) buffer the packets for each station, and release them to the pending skb queue once aggregation is turned on successfully. We get a little more code, but it becomes conceptually simpler and we can remove the entire virtual queue mechanism from mac80211 in a follow-up patch. This changes how mac80211 behaves towards drivers that support aggregation but have no hardware queues -- those drivers will now not be handed packets while the aggregation session is being established, but only after it has been fully established. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 07fe987..841f7f8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -248,6 +248,9 @@ struct ieee80211_bss_conf { * @IEEE80211_TX_INTFL_RCALGO: mac80211 internal flag, do not test or * set this flag in the driver; indicates that the rate control * algorithm was used and should be notified of TX status + * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211, + * used to indicate that a pending frame requires TX processing before + * it can be sent out. */ enum mac80211_tx_control_flags { IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), @@ -264,6 +267,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), IEEE80211_TX_INTFL_RCALGO = BIT(13), + IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14), }; /** diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index fd718e2..64b839b 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -132,16 +132,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, state = &sta->ampdu_mlme.tid_state_tx[tid]; if (local->hw.ampdu_queues) { - if (initiator) { - /* - * Stop the AC queue to avoid issues where we send - * unaggregated frames already before the delba. - */ - ieee80211_stop_queue_by_reason(&local->hw, - local->hw.queues + sta->tid_to_tx_q[tid], - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - } - /* * Pretend the driver woke the queue, just in case * it disabled it before the session was stopped. @@ -158,6 +148,10 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, /* HW shall not deny going back to legacy */ if (WARN_ON(ret)) { *state = HT_AGG_STATE_OPERATIONAL; + /* + * We may have pending packets get stuck in this case... + * Not bothering with a workaround for now. + */ } return ret; @@ -226,13 +220,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ - if (hw->ampdu_queues && ieee80211_ac_from_tid(tid) == 0) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "rejecting on voice AC\n"); -#endif - return -EINVAL; - } - rcu_read_lock(); sta = sta_info_get(local, ra); @@ -267,6 +254,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) } spin_lock_bh(&sta->lock); + spin_lock(&local->ampdu_lock); sdata = sta->sdata; @@ -308,21 +296,19 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) ret = -ENOSPC; goto err_unlock_sta; } - - /* - * If we successfully allocate the session, we can't have - * anything going on on the queue this TID maps into, so - * stop it for now. This is a "virtual" stop using the same - * mechanism that drivers will use. - * - * XXX: queue up frames for this session in the sta_info - * struct instead to avoid hitting all other STAs. - */ - ieee80211_stop_queue_by_reason( - &local->hw, hw->queues + qn, - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); } + /* + * While we're asking the driver about the aggregation, + * stop the AC queue so that we don't have to worry + * about frames that came in while we were doing that, + * which would require us to put them to the AC pending + * afterwards which just makes the code more complex. + */ + ieee80211_stop_queue_by_reason( + &local->hw, ieee80211_ac_from_tid(tid), + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + /* prepare A-MPDU MLME for Tx aggregation */ sta->ampdu_mlme.tid_tx[tid] = kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); @@ -336,6 +322,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) goto err_return_queue; } + skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); + /* Tx timer */ sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = sta_addba_resp_timer_expired; @@ -362,6 +350,12 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) } sta->tid_to_tx_q[tid] = qn; + /* Driver vetoed or OKed, but we can take packets again now */ + ieee80211_wake_queue_by_reason( + &local->hw, ieee80211_ac_from_tid(tid), + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + + spin_unlock(&local->ampdu_lock); spin_unlock_bh(&sta->lock); /* send an addBA request */ @@ -388,15 +382,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) sta->ampdu_mlme.tid_tx[tid] = NULL; err_return_queue: if (qn >= 0) { - /* We failed, so start queue again right away. */ - ieee80211_wake_queue_by_reason(hw, hw->queues + qn, - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); /* give queue back to pool */ spin_lock(&local->queue_stop_reason_lock); local->ampdu_ac_queue[qn] = -1; spin_unlock(&local->queue_stop_reason_lock); } + ieee80211_wake_queue_by_reason( + &local->hw, ieee80211_ac_from_tid(tid), + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); err_unlock_sta: + spin_unlock(&local->ampdu_lock); spin_unlock_bh(&sta->lock); unlock: rcu_read_unlock(); @@ -404,6 +399,45 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) } EXPORT_SYMBOL(ieee80211_start_tx_ba_session); +/* + * splice packets from the STA's pending to the local pending, + * requires a call to ieee80211_agg_splice_finish and holding + * local->ampdu_lock across both calls. + */ +static void ieee80211_agg_splice_packets(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ + unsigned long flags; + u16 queue = ieee80211_ac_from_tid(tid); + + ieee80211_stop_queue_by_reason( + &local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + + if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + /* mark queue as pending, it is stopped already */ + __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, + &local->queue_stop_reasons[queue]); + /* copy over remaining packets */ + skb_queue_splice_tail_init( + &sta->ampdu_mlme.tid_tx[tid]->pending, + &local->pending[queue]); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + } +} + +static void ieee80211_agg_splice_finish(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ + u16 queue = ieee80211_ac_from_tid(tid); + + ieee80211_wake_queue_by_reason( + &local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); +} + +/* caller must hold sta->lock */ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, struct sta_info *sta, u16 tid) { @@ -411,15 +445,16 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); #endif - if (local->hw.ampdu_queues) { - /* - * Wake up the A-MPDU queue, we stopped it earlier, - * this will in turn wake the entire AC. - */ - ieee80211_wake_queue_by_reason(&local->hw, - local->hw.queues + sta->tid_to_tx_q[tid], - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - } + spin_lock(&local->ampdu_lock); + ieee80211_agg_splice_packets(local, sta, tid); + /* + * NB: we rely on sta->lock being taken in the TX + * processing here when adding to the pending queue, + * otherwise we could only change the state of the + * session to OPERATIONAL _here_. + */ + ieee80211_agg_splice_finish(local, sta, tid); + spin_unlock(&local->ampdu_lock); local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL, &sta->sta, tid, NULL); @@ -602,22 +637,19 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); spin_lock_bh(&sta->lock); + spin_lock(&local->ampdu_lock); - if (*state & HT_AGG_STATE_INITIATOR_MSK && - hw->ampdu_queues) { - /* - * Wake up this queue, we stopped it earlier, - * this will in turn wake the entire AC. - */ - ieee80211_wake_queue_by_reason(hw, - hw->queues + sta->tid_to_tx_q[tid], - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - } + ieee80211_agg_splice_packets(local, sta, tid); *state = HT_AGG_STATE_IDLE; + /* from now on packets are no longer put onto sta->pending */ sta->ampdu_mlme.addba_req_num[tid] = 0; kfree(sta->ampdu_mlme.tid_tx[tid]); sta->ampdu_mlme.tid_tx[tid] = NULL; + + ieee80211_agg_splice_finish(local, sta, tid); + + spin_unlock(&local->ampdu_lock); spin_unlock_bh(&sta->lock); rcu_read_unlock(); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6ce62e5..32345b4 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -639,6 +639,14 @@ struct ieee80211_local { struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; + /* + * This lock is used to prevent concurrent A-MPDU + * session start/stop processing, this thus also + * synchronises the ->ampdu_action() callback to + * drivers and limits it to one at a time. + */ + spinlock_t ampdu_lock; + /* number of interfaces with corresponding IFF_ flags */ atomic_t iff_allmultis, iff_promiscs; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a7430e9..756284e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -795,6 +795,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); + spin_lock_init(&local->ampdu_lock); + return local_to_hw(local); } EXPORT_SYMBOL(ieee80211_alloc_hw); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4ba3c54..dd3593c 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -239,6 +239,11 @@ void sta_info_destroy(struct sta_info *sta) tid_tx = sta->ampdu_mlme.tid_tx[i]; if (tid_tx) { del_timer_sync(&tid_tx->addba_resp_timer); + /* + * STA removed while aggregation session being + * started? Bit odd, but purge frames anyway. + */ + skb_queue_purge(&tid_tx->pending); kfree(tid_tx); } } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 5b223b2..18fd5d1 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -73,11 +73,13 @@ enum ieee80211_sta_info_flags { * struct tid_ampdu_tx - TID aggregation information (Tx). * * @addba_resp_timer: timer for peer's response to addba request + * @pending: pending frames queue -- use sta's spinlock to protect * @ssn: Starting Sequence Number expected to be aggregated. * @dialog_token: dialog token for aggregation session */ struct tid_ampdu_tx { struct timer_list addba_resp_timer; + struct sk_buff_head pending; u16 ssn; u8 dialog_token; }; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a0e00c6..906ab78 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -984,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *sdata; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - int hdrlen, tid; u8 *qc, *state; + bool queued = false; memset(tx, 0, sizeof(*tx)); tx->skb = skb; @@ -1013,20 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, */ } + /* + * If this flag is set to true anywhere, and we get here, + * we are doing the needed processing, so remove the flag + * now. + */ + info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; + hdr = (struct ieee80211_hdr *) skb->data; tx->sta = sta_info_get(local, hdr->addr1); - if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { + if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && + (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { unsigned long flags; + struct tid_ampdu_tx *tid_tx; + qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; spin_lock_irqsave(&tx->sta->lock, flags); + /* + * XXX: This spinlock could be fairly expensive, but see the + * comment in agg-tx.c:ieee80211_agg_tx_operational(). + * One way to solve this would be to do something RCU-like + * for managing the tid_tx struct and using atomic bitops + * for the actual state -- by introducing an actual + * 'operational' bit that would be possible. It would + * require changing ieee80211_agg_tx_operational() to + * set that bit, and changing the way tid_tx is managed + * everywhere, including races between that bit and + * tid_tx going away (tid_tx being added can be easily + * committed to memory before the 'operational' bit). + */ + tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; - if (*state == HT_AGG_STATE_OPERATIONAL) + if (*state == HT_AGG_STATE_OPERATIONAL) { info->flags |= IEEE80211_TX_CTL_AMPDU; + } else if (*state != HT_AGG_STATE_IDLE) { + /* in progress */ + queued = true; + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + __skb_queue_tail(&tid_tx->pending, skb); + } spin_unlock_irqrestore(&tx->sta->lock, flags); + + if (unlikely(queued)) + return TX_QUEUED; } if (is_multicast_ether_addr(hdr->addr1)) { @@ -1077,7 +1110,14 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local, } if (unlikely(!dev)) return -ENODEV; - /* initialises tx with control */ + /* + * initialises tx with control + * + * return value is safe to ignore here because this function + * can only be invoked for multicast frames + * + * XXX: clean up + */ __ieee80211_tx_prepare(tx, skb, dev); dev_put(dev); return 0; @@ -1188,7 +1228,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) return 0; } -static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) +static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, + bool txpending) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct sta_info *sta; @@ -1202,11 +1243,11 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) queue = skb_get_queue_mapping(skb); - WARN_ON(!skb_queue_empty(&local->pending[queue])); + WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue])); if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); - return 0; + return; } rcu_read_lock(); @@ -1214,10 +1255,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) /* initialises tx */ res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); - if (res_prepare == TX_DROP) { + if (unlikely(res_prepare == TX_DROP)) { dev_kfree_skb(skb); rcu_read_unlock(); - return 0; + return; + } else if (unlikely(res_prepare == TX_QUEUED)) { + rcu_read_unlock(); + return; } sta = tx.sta; @@ -1251,7 +1295,12 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) do { next = skb->next; skb->next = NULL; - skb_queue_tail(&local->pending[queue], skb); + if (unlikely(txpending)) + skb_queue_head(&local->pending[queue], + skb); + else + skb_queue_tail(&local->pending[queue], + skb); } while ((skb = next)); /* @@ -1276,7 +1325,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) } out: rcu_read_unlock(); - return 0; + return; drop: rcu_read_unlock(); @@ -1287,7 +1336,6 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) dev_kfree_skb(skb); skb = next; } - return 0; } /* device xmit handlers */ @@ -1346,7 +1394,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) FOUND_SDATA, UNKNOWN_ADDRESS, } monitor_iface = NOT_MONITOR; - int ret; if (skb->iif) odev = dev_get_by_index(&init_net, skb->iif); @@ -1360,7 +1407,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) "originating device\n", dev->name); #endif dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && @@ -1389,7 +1436,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) else if (mesh_nexthop_lookup(skb, osdata)) { dev_put(odev); - return 0; + return NETDEV_TX_OK; } if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, @@ -1451,7 +1498,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { dev_kfree_skb(skb); dev_put(odev); - return 0; + return NETDEV_TX_OK; } if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) @@ -1460,10 +1507,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) u.ap); if (likely(monitor_iface != UNKNOWN_ADDRESS)) info->control.vif = &osdata->vif; - ret = ieee80211_tx(odev, skb); + + ieee80211_tx(odev, skb, false); dev_put(odev); - return ret; + return NETDEV_TX_OK; } int ieee80211_monitor_start_xmit(struct sk_buff *skb, @@ -1827,6 +1875,54 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local) skb_queue_purge(&local->pending[i]); } +static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_hdr *hdr; + struct net_device *dev; + int ret; + bool result = true; + + /* does interface still exist? */ + dev = dev_get_by_index(&init_net, skb->iif); + if (!dev) { + dev_kfree_skb(skb); + return true; + } + + /* validate info->control.vif against skb->iif */ + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) { + dev_kfree_skb(skb); + result = true; + goto out; + } + + if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { + ieee80211_tx(dev, skb, true); + } else { + hdr = (struct ieee80211_hdr *)skb->data; + sta = sta_info_get(local, hdr->addr1); + + ret = __ieee80211_tx(local, &skb, sta); + if (ret != IEEE80211_TX_OK) + result = false; + } + + out: + dev_put(dev); + + return result; +} + /* * Transmit all pending packets. Called from tasklet, locks master device * TX lock so that no new packets can come in. @@ -1835,9 +1931,8 @@ void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; struct net_device *dev = local->mdev; - struct ieee80211_hdr *hdr; unsigned long flags; - int i, ret; + int i; bool next; rcu_read_lock(); @@ -1868,13 +1963,8 @@ void ieee80211_tx_pending(unsigned long data) while (!skb_queue_empty(&local->pending[i])) { struct sk_buff *skb = skb_dequeue(&local->pending[i]); - struct sta_info *sta; - - hdr = (struct ieee80211_hdr *)skb->data; - sta = sta_info_get(local, hdr->addr1); - ret = __ieee80211_tx(local, &skb, sta); - if (ret != IEEE80211_TX_OK) { + if (!ieee80211_tx_pending_skb(local, skb)) { skb_queue_head(&local->pending[i], skb); break; } -- cgit v0.10.2 From e4e72fb4de93e3d4047a4ee3f08778422e17ed0d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Mar 2009 17:28:42 +0100 Subject: mac80211/iwlwifi: move virtual A-MDPU queue bookkeeping to iwlwifi This patch removes all the virtual A-MPDU-queue bookkeeping from mac80211. Curiously, iwlwifi already does its own bookkeeping, so it doesn't require much changes except where it needs to handle starting and stopping the queues in mac80211. To handle the queue stop/wake properly, we rewrite the software queue number for aggregation frames and internally to iwlwifi keep track of the queues that map into the same AC queue, and only talk to mac80211 about the AC queue. The implementation requires calling two new functions, iwl_stop_queue and iwl_wake_queue instead of the mac80211 counterparts. Signed-off-by: Johannes Berg Cc: Reinette Chattre Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index d03f553..2399328 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -293,7 +293,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && (txq_id != IWL_CMD_QUEUE_NUM) && priv->mac80211_registered) - ieee80211_wake_queue(priv->hw, txq_id); + iwl_wake_queue(priv, txq_id); } /** diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index bd0140b..847a622 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -2178,10 +2178,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, (iwl_queue_space(&txq->q) > txq->q.low_mark) && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { if (agg->state == IWL_AGG_OFF) - ieee80211_wake_queue(priv->hw, txq_id); + iwl_wake_queue(priv, txq_id); else - ieee80211_wake_queue(priv->hw, - txq->swq_id); + iwl_wake_queue(priv, txq->swq_id); } } } else { @@ -2205,7 +2204,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark)) - ieee80211_wake_queue(priv->hw, txq_id); + iwl_wake_queue(priv, txq_id); } if (qc && likely(sta_id != IWL_INVALID_STATION)) diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index a3d9a95..e5ca251 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -1295,10 +1295,9 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv, (iwl_queue_space(&txq->q) > txq->q.low_mark) && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { if (agg->state == IWL_AGG_OFF) - ieee80211_wake_queue(priv->hw, txq_id); + iwl_wake_queue(priv, txq_id); else - ieee80211_wake_queue(priv->hw, - txq->swq_id); + iwl_wake_queue(priv, txq->swq_id); } } } else { @@ -1324,7 +1323,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv, if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark)) - ieee80211_wake_queue(priv->hw, txq_id); + iwl_wake_queue(priv, txq_id); } if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 4b1298c..c54fb93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1309,9 +1309,6 @@ int iwl_setup_mac(struct iwl_priv *priv) /* Default value; 4 EDCA QOS priorities */ hw->queues = 4; - /* queues to support 11n aggregation */ - if (priv->cfg->sku & IWL_SKU_N) - hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues; hw->conf.beacon_int = 100; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 0baae80..ec9a138 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -996,6 +996,12 @@ struct iwl_priv { u8 key_mapping_key; unsigned long ucode_key_table; + /* queue refcounts */ +#define IWL_MAX_HW_QUEUES 32 + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + /* for each AC */ + atomic_t queue_stop_count[4]; + /* Indication if ieee80211_ops->open has been called */ u8 is_open; diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h index fb64d29..a1328c3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -93,4 +93,56 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev, return (desc->v_addr != NULL) ? 0 : -ENOMEM; } +/* + * we have 8 bits used like this: + * + * 7 6 5 4 3 2 1 0 + * | | | | | | | | + * | | | | | | +-+-------- AC queue (0-3) + * | | | | | | + * | +-+-+-+-+------------ HW A-MPDU queue + * | + * +---------------------- indicates agg queue + */ +static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq) +{ + BUG_ON(ac > 3); /* only have 2 bits */ + BUG_ON(hwq > 31); /* only have 5 bits */ + + return 0x80 | (hwq << 2) | ac; +} + +static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue) +{ + u8 ac = queue; + u8 hwq = queue; + + if (queue & 0x80) { + ac = queue & 3; + hwq = (queue >> 2) & 0x1f; + } + + if (test_and_clear_bit(hwq, priv->queue_stopped)) + if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(priv->hw, ac); +} + +static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue) +{ + u8 ac = queue; + u8 hwq = queue; + + if (queue & 0x80) { + ac = queue & 3; + hwq = (queue >> 2) & 0x1f; + } + + if (!test_and_set_bit(hwq, priv->queue_stopped)) + if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(priv->hw, ac); +} + +#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue +#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue + #endif /* __iwl_helpers_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index b13862a..1f117a4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -763,8 +763,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr->seq_ctrl |= cpu_to_le16(seq_number); seq_number += 0x10; /* aggregation is on for this */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) + if (info->flags & IEEE80211_TX_CTL_AMPDU) { txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; + swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); + } priv->stations[sta_id].tid[tid].tfds_in_queue++; } @@ -895,7 +897,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwl_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); } else { - ieee80211_stop_queue(priv->hw, txq->swq_id); + iwl_stop_queue(priv, txq->swq_id); } } @@ -1433,7 +1435,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && priv->mac80211_registered && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) - ieee80211_wake_queue(priv->hw, txq->swq_id); + iwl_wake_queue(priv, txq->swq_id); iwl_txq_check_empty(priv, sta_id, tid, scd_flow); } diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index ede29b6..a71b08c 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -1168,7 +1168,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) spin_unlock_irqrestore(&priv->lock, flags); } - ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb)); + iwl_stop_queue(priv, skb_get_queue_mapping(skb)); } return 0; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 5511610..d4fdc8b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -933,7 +933,6 @@ static int __init init_mac80211_hwsim(void) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT); - hw->ampdu_queues = 1; hw->flags = IEEE80211_HW_MFP_CAPABLE; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 841f7f8..3b83a80 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -93,12 +93,9 @@ struct ieee80211_ht_bss_info { * enum ieee80211_max_queues - maximum number of queues * * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. - * @IEEE80211_MAX_AMPDU_QUEUES: Maximum number of queues usable - * for A-MPDU operation. */ enum ieee80211_max_queues { IEEE80211_MAX_QUEUES = 4, - IEEE80211_MAX_AMPDU_QUEUES = 16, }; /** @@ -952,12 +949,6 @@ enum ieee80211_hw_flags { * data packets. WMM/QoS requires at least four, these * queues need to have configurable access parameters. * - * @ampdu_queues: number of available hardware transmit queues - * for A-MPDU packets, these have no access parameters - * because they're used only for A-MPDU frames. Note that - * mac80211 will not currently use any of the regular queues - * for aggregation. - * * @rate_control_algorithm: rate control algorithm for this hardware. * If unset (NULL), the default algorithm will be used. Must be * set before calling ieee80211_register_hw(). @@ -982,7 +973,6 @@ struct ieee80211_hw { int vif_data_size; int sta_data_size; u16 queues; - u16 ampdu_queues; u16 max_listen_interval; s8 max_signal; u8 max_rates; @@ -1372,8 +1362,8 @@ enum ieee80211_ampdu_mlme_action { * @get_tx_stats: Get statistics of the current TX queue status. This is used * to get number of currently queued packets (queue length), maximum queue * size (limit), and total number of packets sent using each TX queue - * (count). The 'stats' pointer points to an array that has hw->queues + - * hw->ampdu_queues items. + * (count). The 'stats' pointer points to an array that has hw->queues + * items. * * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, * this is only used for IBSS mode BSSID merging and debugging. Is not a diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 64b839b..947aaaa 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -131,14 +131,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, state = &sta->ampdu_mlme.tid_state_tx[tid]; - if (local->hw.ampdu_queues) { - /* - * Pretend the driver woke the queue, just in case - * it disabled it before the session was stopped. - */ - ieee80211_wake_queue( - &local->hw, local->hw.queues + sta->tid_to_tx_q[tid]); - } *state = HT_AGG_STATE_REQ_STOP_BA_MSK | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); @@ -206,7 +198,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) struct sta_info *sta; struct ieee80211_sub_if_data *sdata; u8 *state; - int i, qn = -1, ret = 0; + int ret = 0; u16 start_seq_num; if (WARN_ON(!local->ops->ampdu_action)) @@ -275,29 +267,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) goto err_unlock_sta; } - if (hw->ampdu_queues) { - spin_lock(&local->queue_stop_reason_lock); - /* reserve a new queue for this session */ - for (i = 0; i < local->hw.ampdu_queues; i++) { - if (local->ampdu_ac_queue[i] < 0) { - qn = i; - local->ampdu_ac_queue[qn] = - ieee80211_ac_from_tid(tid); - break; - } - } - spin_unlock(&local->queue_stop_reason_lock); - - if (qn < 0) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "BA request denied - " - "queue unavailable for tid %d\n", tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ - ret = -ENOSPC; - goto err_unlock_sta; - } - } - /* * While we're asking the driver about the aggregation, * stop the AC queue so that we don't have to worry @@ -319,7 +288,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) tid); #endif ret = -ENOMEM; - goto err_return_queue; + goto err_wake_queue; } skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); @@ -348,7 +317,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) *state = HT_AGG_STATE_IDLE; goto err_free; } - sta->tid_to_tx_q[tid] = qn; /* Driver vetoed or OKed, but we can take packets again now */ ieee80211_wake_queue_by_reason( @@ -380,13 +348,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) err_free: kfree(sta->ampdu_mlme.tid_tx[tid]); sta->ampdu_mlme.tid_tx[tid] = NULL; - err_return_queue: - if (qn >= 0) { - /* give queue back to pool */ - spin_lock(&local->queue_stop_reason_lock); - local->ampdu_ac_queue[qn] = -1; - spin_unlock(&local->queue_stop_reason_lock); - } + err_wake_queue: ieee80211_wake_queue_by_reason( &local->hw, ieee80211_ac_from_tid(tid), IEEE80211_QUEUE_STOP_REASON_AGGREGATION); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 32345b4..e6ed78c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -594,12 +594,7 @@ struct ieee80211_local { const struct ieee80211_ops *ops; - /* AC queue corresponding to each AMPDU queue */ - s8 ampdu_ac_queue[IEEE80211_MAX_AMPDU_QUEUES]; - unsigned int amdpu_ac_stop_refcnt[IEEE80211_MAX_AMPDU_QUEUES]; - - unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES + - IEEE80211_MAX_AMPDU_QUEUES]; + unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ spinlock_t queue_stop_reason_lock; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 756284e..a6f1d8a 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -774,11 +774,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, setup_timer(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, (unsigned long) local); - for (i = 0; i < IEEE80211_MAX_AMPDU_QUEUES; i++) - local->ampdu_ac_queue[i] = -1; - /* using an s8 won't work with more than that */ - BUILD_BUG_ON(IEEE80211_MAX_AMPDU_QUEUES > 127); - sta_info_init(local); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) @@ -874,10 +869,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) */ if (hw->queues > IEEE80211_MAX_QUEUES) hw->queues = IEEE80211_MAX_QUEUES; - if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES) - hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES; - if (hw->queues < 4) - hw->ampdu_queues = 0; mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), "wmaster%d", ieee80211_master_setup, diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index dd3593c..c5f14e6 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -203,17 +203,6 @@ void sta_info_destroy(struct sta_info *sta) if (tid_rx) tid_rx->shutdown = true; - /* - * The stop callback cannot find this station any more, but - * it didn't complete its work -- start the queue if necessary - */ - if (sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_INITIATOR_MSK && - sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_REQ_STOP_BA_MSK && - local->hw.ampdu_queues) - ieee80211_wake_queue_by_reason(&local->hw, - local->hw.queues + sta->tid_to_tx_q[i], - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - spin_unlock_bh(&sta->lock); /* @@ -292,7 +281,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, * enable session_timer's data differentiation. refer to * sta_rx_agg_session_timer_expired for useage */ sta->timer_to_tid[i] = i; - sta->tid_to_tx_q[i] = -1; /* rx */ sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; sta->ampdu_mlme.tid_rx[i] = NULL; diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 18fd5d1..5534d48 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -206,7 +206,6 @@ struct sta_ampdu_mlme { * @tid_seq: per-TID sequence numbers for sending to this STA * @ampdu_mlme: A-MPDU state machine state * @timer_to_tid: identity mapping to ID timers - * @tid_to_tx_q: map tid to tx queue (invalid == negative values) * @llid: Local link ID * @plid: Peer link ID * @reason: Cancel reason on PLINK_HOLDING state @@ -281,7 +280,6 @@ struct sta_info { */ struct sta_ampdu_mlme ampdu_mlme; u8 timer_to_tid[STA_TID_NUM]; - s8 tid_to_tx_q[STA_TID_NUM]; #ifdef CONFIG_MAC80211_MESH /* diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 906ab78..3fb04a8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1145,25 +1145,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); - /* - * Internally, we need to have the queue mapping point to - * the real AC queue, not the virtual A-MPDU queue. This - * now finally sets the queue to what the driver wants. - * We will later move this down into the only driver that - * needs it, iwlwifi. - */ - if (sta && local->hw.ampdu_queues && - info->flags & IEEE80211_TX_CTL_AMPDU) { - unsigned long flags; - u8 *qc = ieee80211_get_qos_ctl((void *) skb->data); - int tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - - spin_lock_irqsave(&sta->lock, flags); - skb_set_queue_mapping(skb, local->hw.queues + - sta->tid_to_tx_q[tid]); - spin_unlock_irqrestore(&sta->lock, flags); - } - next = skb->next; len = skb->len; ret = local->ops->tx(local_to_hw(local), skb); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0247d80..fdf432f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -339,29 +339,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, { struct ieee80211_local *local = hw_to_local(hw); - if (queue >= hw->queues) { - if (local->ampdu_ac_queue[queue - hw->queues] < 0) - return; - - /* - * for virtual aggregation queues, we need to refcount the - * internal mac80211 disable (multiple times!), keep track of - * driver disable _and_ make sure the regular queue is - * actually enabled. - */ - if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION) - local->amdpu_ac_stop_refcnt[queue - hw->queues]--; - else - __clear_bit(reason, &local->queue_stop_reasons[queue]); - - if (local->queue_stop_reasons[queue] || - local->amdpu_ac_stop_refcnt[queue - hw->queues]) - return; - - /* now go on to treat the corresponding regular queue */ - queue = local->ampdu_ac_queue[queue - hw->queues]; - reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; - } + if (WARN_ON(queue >= hw->queues)) + return; __clear_bit(reason, &local->queue_stop_reasons[queue]); @@ -400,25 +379,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, { struct ieee80211_local *local = hw_to_local(hw); - if (queue >= hw->queues) { - if (local->ampdu_ac_queue[queue - hw->queues] < 0) - return; - - /* - * for virtual aggregation queues, we need to refcount the - * internal mac80211 disable (multiple times!), keep track of - * driver disable _and_ make sure the regular queue is - * actually enabled. - */ - if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION) - local->amdpu_ac_stop_refcnt[queue - hw->queues]++; - else - __set_bit(reason, &local->queue_stop_reasons[queue]); - - /* now go on to treat the corresponding regular queue */ - queue = local->ampdu_ac_queue[queue - hw->queues]; - reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; - } + if (WARN_ON(queue >= hw->queues)) + return; /* * Only stop if it was previously running, this is necessary @@ -474,15 +436,9 @@ EXPORT_SYMBOL(ieee80211_stop_queues); int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) { struct ieee80211_local *local = hw_to_local(hw); - unsigned long flags; - if (queue >= hw->queues) { - spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - queue = local->ampdu_ac_queue[queue - hw->queues]; - spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); - if (queue < 0) - return true; - } + if (WARN_ON(queue >= hw->queues)) + return true; return __netif_subqueue_stopped(local->mdev, queue); } @@ -497,7 +453,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - for (i = 0; i < hw->queues + hw->ampdu_queues; i++) + for (i = 0; i < hw->queues; i++) __ieee80211_wake_queue(hw, i, reason); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); -- cgit v0.10.2 From 8a5117d80fe93de5df5b56480054f7df1fd20755 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Tue, 24 Mar 2009 21:21:07 -0400 Subject: cfg80211: default CONFIG_WIRELESS_OLD_REGULATORY to n And update description and feature-removal schedule according to the new plan. Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 8365f52..02ea377 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -7,7 +7,7 @@ be removed from this file. --------------------------- What: The ieee80211_regdom module parameter -When: March 2010 +When: March 2010 / desktop catchup Why: This was inherited by the CONFIG_WIRELESS_OLD_REGULATORY code, and currently serves as an option for users to define an @@ -30,18 +30,23 @@ Who: Luis R. Rodriguez --------------------------- -What: old static regulatory information -When: 2.6.29 +What: CONFIG_WIRELESS_OLD_REGULATORY - old static regulatory information +When: March 2010 / desktop catchup + Why: The old regulatory infrastructure has been replaced with a new one which does not require statically defined regulatory domains. We do not want to keep static regulatory domains in the kernel due to the the dynamic nature of regulatory law and localization. We kept around the old static definitions for the regulatory domains of: + * US * JP * EU + and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was - set. + set. We will remove this option once the standard Linux desktop catches + up with the new userspace APIs we have implemented. + Who: Luis R. Rodriguez --------------------------- diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index d1d18f3..3c3bc9e 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -12,36 +12,17 @@ config CFG80211_REG_DEBUG config WIRELESS_OLD_REGULATORY bool "Old wireless static regulatory definitions" - default y + default n ---help--- This option enables the old static regulatory information - and uses it within the new framework. This is available - temporarily as an option to help prevent immediate issues - due to the switch to the new regulatory framework which - does require a new userspace application which has the - database of regulatory information (CRDA) and another for - setting regulatory domains (iw). - - For more information see: - - http://wireless.kernel.org/en/developers/Regulatory/CRDA - http://wireless.kernel.org/en/users/Documentation/iw - - It is important to note though that if you *do* have CRDA present - and if this option is enabled CRDA *will* be called to update the - regulatory domain (for US and JP only). Support for letting the user - set the regulatory domain through iw is also supported. This option - mainly exists to leave around for a kernel release some old static - regulatory domains that were defined and to keep around the old - ieee80211_regdom module parameter. This is being phased out and you - should stop using them ASAP. - - Note: You will need CRDA if you want 802.11d support - - Say Y unless you have installed a new userspace application. - Also say Y if have one currently depending on the ieee80211_regdom - module parameter and cannot port it to use the new userspace - interfaces. + and uses it within the new framework. This option is available + for historical reasons and it is advised to leave it off. + + For details see: + + http://wireless.kernel.org/en/developers/Regulatory + + Say N and if you say Y, please tell us why. The default is N. config WIRELESS_EXT bool "Wireless extensions" -- cgit v0.10.2 From 80e20f6f360078b4852eac6825883e5aa25564bb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 17:22:55 -0700 Subject: Revert "netrom: zero length frame filtering in NetRom" This reverts commit a3ac80a130300573de351083cf4a5b46d233e8bf. Alan Cox says that zero length writes do have special meaning and are useful in this protocol. Signed-off-by: David S. Miller diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1c16bb..4e705f8 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1037,10 +1037,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, unsigned char *asmptr; int size; - /* Netrom empty data frame has no meaning : don't send */ - if (len == 0) - return 0; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; @@ -1175,11 +1171,6 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); copied = skb->len; - /* NetRom empty data frame has no meaning : ignore it */ - if (copied == 0) { - goto out; - } - if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; @@ -1195,7 +1186,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_namelen = sizeof(*sax); -out: skb_free_datagram(sk, skb); + skb_free_datagram(sk, skb); release_sock(sk); return copied; -- cgit v0.10.2 From c44a4366649aca4f5b4a51ff71d4c9cde3b7c9da Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 17:23:42 -0700 Subject: Revert "ax25: zero length frame filtering in AX25" This reverts commit f99bcff7a290768e035f3d4726e103c6ebe858bf. Like netrom, Alan Cox says that zero lengths have real meaning and are useful in this protocol. Signed-off-by: David S. Miller diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7da5ebb..fd9d06f 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1435,11 +1435,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, size_t size; int lv, err, addr_len = msg->msg_namelen; - /* AX.25 empty data frame has no meaning : don't send */ - if (len == 0) { - return (0); - } - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; @@ -1639,13 +1634,6 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); copied = skb->len; - /* AX.25 empty data frame has no meaning : ignore it */ - if (copied == 0) { - err = copied; - skb_free_datagram(sk, skb); - goto out; - } - if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; -- cgit v0.10.2 From 6e8a4fa651975ff808dba130eae442f4cea1671c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 27 Mar 2009 18:15:02 -0700 Subject: sparc64: We need to use compat_sys_ustat() as well. Sparc was missed in commit 2b1c6bd77d4e6a727ffac8630cd154b2144b751a ("generic compat_sys_ustat"). We definitely need it, since our __kernel_ino_t is "unsigned long". Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index f93c42a..a8000b1 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -51,7 +51,7 @@ sys_call_table32: /*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall - .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr + .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr /*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr /*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall -- cgit v0.10.2