summaryrefslogtreecommitdiff
path: root/net/caif
diff options
context:
space:
mode:
Diffstat (limited to 'net/caif')
-rw-r--r--net/caif/Kconfig5
-rw-r--r--net/caif/caif_socket.c91
-rw-r--r--net/caif/cfctrl.c92
-rw-r--r--net/caif/cfmuxl.c3
-rw-r--r--net/caif/cfpkt_skbuff.c25
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c6
-rw-r--r--net/caif/cfveil.c2
9 files changed, 94 insertions, 139 deletions
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index cd1daf6..ed65178 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -2,10 +2,8 @@
# CAIF net configurations
#
-#menu "CAIF Support"
-comment "CAIF Support"
menuconfig CAIF
- tristate "Enable CAIF support"
+ tristate "CAIF support"
select CRC_CCITT
default n
---help---
@@ -45,4 +43,3 @@ config CAIF_NETDEV
If unsure say Y.
endif
-#endmenu
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index c3a70c5..3d0e095 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -60,7 +60,7 @@ struct debug_fs_counter {
atomic_t num_rx_flow_off;
atomic_t num_rx_flow_on;
};
-struct debug_fs_counter cnt;
+static struct debug_fs_counter cnt;
#define dbfs_atomic_inc(v) atomic_inc(v)
#define dbfs_atomic_dec(v) atomic_dec(v)
#else
@@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk)
mutex_unlock(&cf_sk->readlock);
}
-int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
+static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
{
/* A quarter of full buffer is used a low water mark */
return cf_sk->sk.sk_rcvbuf / 4;
}
-void caif_flow_ctrl(struct sock *sk, int mode)
+static void caif_flow_ctrl(struct sock *sk, int mode)
{
struct caifsock *cf_sk;
cf_sk = container_of(sk, struct caifsock, sk);
- if (cf_sk->layer.dn)
+ if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
}
@@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode)
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
* not dropped, but CAIF is sending flow off instead.
*/
-int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
int skb_len;
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
- if (cf_sk->layer.dn)
- cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
- CAIF_MODEMCMD_FLOW_OFF_REQ);
+ dbfs_atomic_inc(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
err = sk_filter(sk, skb);
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
trace_printk("CAIF: %s():"
" sending flow OFF due to rmem_schedule\n",
__func__);
- if (cf_sk->layer.dn)
- cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
- CAIF_MODEMCMD_FLOW_OFF_REQ);
+ dbfs_atomic_inc(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
skb_set_owner_r(skb, sk);
@@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk)
{
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
- if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
- return;
if (rx_flow_is_on(cf_sk))
return;
if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
dbfs_atomic_inc(&cnt.num_rx_flow_on);
set_rx_flow_on(cf_sk);
- cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
- CAIF_MODEMCMD_FLOW_ON_REQ);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
}
}
+
/*
- * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
- * has sufficient size.
+ * Copied from unix_dgram_recvmsg, but removed credit checks,
+ * changed locking, address handling and added MSG_TRUNC.
*/
-
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+ struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
- int ret = 0;
- int len;
+ int ret;
+ int copylen;
- if (unlikely(!buf_len))
- return -EINVAL;
+ ret = -EOPNOTSUPP;
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
-
- len = skb->len;
-
- if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
- len = buf_len;
- /*
- * Push skb back on receive queue if buffer too small.
- * This has a built-in race where multi-threaded receive
- * may get packet in wrong order, but multiple read does
- * not really guarantee ordered delivery anyway.
- * Let's optimize for speed without taking locks.
- */
-
- skb_queue_head(&sk->sk_receive_queue, skb);
- ret = -EMSGSIZE;
- goto read_error;
+ copylen = skb->len;
+ if (len < copylen) {
+ m->msg_flags |= MSG_TRUNC;
+ copylen = len;
}
- ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
+ ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
- goto read_error;
+ goto out_free;
+ ret = (flags & MSG_TRUNC) ? skb->len : copylen;
+out_free:
skb_free_datagram(sk, skb);
-
caif_check_flow_release(sk);
-
- return len;
+ return ret;
read_error:
return ret;
@@ -920,17 +904,17 @@ wait_connect:
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
release_sock(sk);
- err = wait_event_interruptible_timeout(*sk_sleep(sk),
+ err = -ERESTARTSYS;
+ timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
sk->sk_state != CAIF_CONNECTING,
timeo);
lock_sock(sk);
- if (err < 0)
+ if (timeo < 0)
goto out; /* -ERESTARTSYS */
- if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
- err = -ETIMEDOUT;
- goto out;
- }
+ err = -ETIMEDOUT;
+ if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
+ goto out;
if (sk->sk_state != CAIF_CONNECTED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk);
@@ -945,7 +929,6 @@ out:
return err;
}
-
/*
* caif_release() - Disconnect a CAIF Socket
* Copied and modified af_irda.c:irda_release().
@@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file,
(sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
- /* Connection-based need to check for termination and startup */
- if (sk->sk_state == CAIF_DISCONNECTED)
- mask |= POLLHUP;
-
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
@@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = {
.owner = THIS_MODULE,
};
-int af_caif_init(void)
+static int af_caif_init(void)
{
int err = sock_register(&caif_family_ops);
if (!err)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 0ffe1e1..fcfda98 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void)
dev_info.id = 0xff;
memset(this, 0, sizeof(*this));
cfsrvl_init(&this->serv, 0, &dev_info);
- spin_lock_init(&this->info_list_lock);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
this->serv.layer.receive = cfctrl_recv;
sprintf(this->serv.layer.name, "ctrl");
this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
spin_lock_init(&this->loop_linkid_lock);
+ spin_lock_init(&this->info_list_lock);
+ INIT_LIST_HEAD(&this->list);
this->loop_linkid = 1;
return &this->serv.layer;
}
@@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
void cfctrl_insert_req(struct cfctrl *ctrl,
struct cfctrl_request_info *req)
{
- struct cfctrl_request_info *p;
spin_lock(&ctrl->info_list_lock);
- req->next = NULL;
atomic_inc(&ctrl->req_seq_no);
req->sequence_no = atomic_read(&ctrl->req_seq_no);
- if (ctrl->first_req == NULL) {
- ctrl->first_req = req;
- spin_unlock(&ctrl->info_list_lock);
- return;
- }
- p = ctrl->first_req;
- while (p->next != NULL)
- p = p->next;
- p->next = req;
+ list_add_tail(&req->list, &ctrl->list);
spin_unlock(&ctrl->info_list_lock);
}
@@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
struct cfctrl_request_info *req)
{
- struct cfctrl_request_info *p;
- struct cfctrl_request_info *ret;
+ struct cfctrl_request_info *p, *tmp, *first;
spin_lock(&ctrl->info_list_lock);
- if (ctrl->first_req == NULL) {
- spin_unlock(&ctrl->info_list_lock);
- return NULL;
- }
-
- if (cfctrl_req_eq(req, ctrl->first_req)) {
- ret = ctrl->first_req;
- caif_assert(ctrl->first_req);
- atomic_set(&ctrl->rsp_seq_no,
- ctrl->first_req->sequence_no);
- ctrl->first_req = ctrl->first_req->next;
- spin_unlock(&ctrl->info_list_lock);
- return ret;
- }
+ first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
- p = ctrl->first_req;
-
- while (p->next != NULL) {
- if (cfctrl_req_eq(req, p->next)) {
- pr_warning("CAIF: %s(): Requests are not "
+ list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
+ if (cfctrl_req_eq(req, p)) {
+ if (p != first)
+ pr_warning("CAIF: %s(): Requests are not "
"received in order\n",
__func__);
- ret = p->next;
+
atomic_set(&ctrl->rsp_seq_no,
- p->next->sequence_no);
- p->next = p->next->next;
- spin_unlock(&ctrl->info_list_lock);
- return ret;
+ p->sequence_no);
+ list_del(&p->list);
+ goto out;
}
- p = p->next;
}
+ p = NULL;
+out:
spin_unlock(&ctrl->info_list_lock);
-
- pr_warning("CAIF: %s(): Request does not match\n",
- __func__);
- return NULL;
+ return p;
}
struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
@@ -388,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
{
- struct cfctrl_request_info *p, *req;
+ struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layr);
spin_lock(&ctrl->info_list_lock);
-
- if (ctrl->first_req == NULL) {
- spin_unlock(&ctrl->info_list_lock);
- return;
- }
-
- if (ctrl->first_req->client_layer == adap_layer) {
-
- req = ctrl->first_req;
- ctrl->first_req = ctrl->first_req->next;
- kfree(req);
- }
-
- p = ctrl->first_req;
- while (p != NULL && p->next != NULL) {
- if (p->next->client_layer == adap_layer) {
-
- req = p->next;
- p->next = p->next->next;
- kfree(p->next);
+ pr_warning("CAIF: %s(): enter\n", __func__);
+
+ list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
+ if (p->client_layer == adap_layer) {
+ pr_warning("CAIF: %s(): cancel req :%d\n", __func__,
+ p->sequence_no);
+ list_del(&p->list);
+ kfree(p);
}
- p = p->next;
}
spin_unlock(&ctrl->info_list_lock);
@@ -634,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
case CAIF_CTRLCMD_FLOW_OFF_IND:
spin_lock(&this->info_list_lock);
- if (this->first_req != NULL) {
+ if (!list_empty(&this->list)) {
pr_debug("CAIF: %s(): Received flow off in "
"control layer", __func__);
}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 7372f27..80c8d33 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
spin_lock(&muxl->receive_lock);
up = get_up(muxl, id);
if (up == NULL)
- return NULL;
+ goto out;
memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
list_del(&up->node);
cfsrvl_put(up);
+out:
spin_unlock(&muxl->receive_lock);
return up;
}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 83fff2f..a6fdf89 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -238,6 +238,7 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
struct sk_buff *lastskb;
u8 *to;
const u8 *data = data2;
+ int ret;
if (unlikely(is_erronous(pkt)))
return -EPROTO;
if (unlikely(skb_headroom(skb) < len)) {
@@ -246,9 +247,10 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
}
/* Make sure data is writable */
- if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ ret = skb_cow_data(skb, 0, &lastskb);
+ if (unlikely(ret < 0)) {
PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
- return -EPROTO;
+ return ret;
}
to = skb_push(skb, len);
@@ -316,6 +318,8 @@ EXPORT_SYMBOL(cfpkt_setlen);
struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
{
struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ if (!pkt)
+ return NULL;
if (unlikely(data != NULL))
cfpkt_add_body(pkt, data, len);
return pkt;
@@ -344,12 +348,13 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
if (dst->tail + neededtailspace > dst->end) {
/* Create a dumplicate of 'dst' with more tail space */
+ struct cfpkt *tmppkt;
dstlen = skb_headlen(dst);
createlen = dstlen + neededtailspace;
- tmp = pkt_to_skb(
- cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
- if (!tmp)
+ tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX);
+ if (tmppkt == NULL)
return NULL;
+ tmp = pkt_to_skb(tmppkt);
skb_set_tail_pointer(tmp, dstlen);
tmp->len = dstlen;
memcpy(tmp->data, dst->data, dstlen);
@@ -368,6 +373,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
{
struct sk_buff *skb2;
struct sk_buff *skb = pkt_to_skb(pkt);
+ struct cfpkt *tmppkt;
u8 *split = skb->data + pos;
u16 len2nd = skb_tail_pointer(skb) - split;
@@ -381,9 +387,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
}
/* Create a new packet for the second part of the data */
- skb2 = pkt_to_skb(
- cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
- PKT_PREFIX));
+ tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
+ PKT_PREFIX);
+ if (tmppkt == NULL)
+ return NULL;
+ skb2 = pkt_to_skb(tmppkt);
+
if (skb2 == NULL)
return NULL;
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830f..fd27b17 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
- if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
pr_err("CAIF: %s():Packet too large - size=%d\n",
__func__, cfpkt_getlen(pkt));
return -EOVERFLOW;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 06029ea..965c5ba 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -59,14 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
u8 stx = CFSERL_STX;
int ret;
u16 expectlen = 0;
+
caif_assert(newpkt != NULL);
spin_lock(&layr->sync);
if (layr->incomplete_frm != NULL) {
-
layr->incomplete_frm =
cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
pkt = layr->incomplete_frm;
+ if (pkt == NULL) {
+ spin_unlock(&layr->sync);
+ return -ENOMEM;
+ }
} else {
pkt = newpkt;
}
@@ -154,7 +158,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
if (layr->usestx) {
if (tail_pkt != NULL)
pkt = cfpkt_append(pkt, tail_pkt, 0);
-
/* Start search for next STX if frame failed */
continue;
} else {
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index aff31f3..6e5b7079 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -123,6 +123,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("CAIF: %s(): Packet is erroneous!\n",
__func__);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f..e04f7d9 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
return ret;
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
__func__, cfpkt_getlen(pkt));
return -EOVERFLOW;