summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/ah4.c36
-rw-r--r--net/ipv4/esp4.c85
-rw-r--r--net/ipv4/ipcomp.c25
4 files changed, 88 insertions, 59 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8514106..3b5d504 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -386,6 +386,7 @@ config INET_ESP
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
+ select CRYPTO_CBC
select CRYPTO_SHA1
select CRYPTO_DES
---help---
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 1366bc6..2b98943 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,4 @@
+#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@@ -97,7 +98,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
xfrm_aevent_doreplay(x);
- ahp->icv(ahp, skb, ah->auth_data);
+ err = ah_mac_digest(ahp, skb, ah->auth_data);
+ if (err)
+ goto error;
+ memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
@@ -119,6 +123,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ah_hlen;
int ihl;
+ int err = -EINVAL;
struct iphdr *iph;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
@@ -166,8 +171,11 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
skb_push(skb, ihl);
- ahp->icv(ahp, skb, ah->auth_data);
- if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
+ err = ah_mac_digest(ahp, skb, ah->auth_data);
+ if (err)
+ goto out;
+ err = -EINVAL;
+ if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
x->stats.integrity_failed++;
goto out;
}
@@ -179,7 +187,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
return 0;
out:
- return -EINVAL;
+ return err;
}
static void ah4_err(struct sk_buff *skb, u32 info)
@@ -204,6 +212,7 @@ static int ah_init_state(struct xfrm_state *x)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
+ struct crypto_hash *tfm;
if (!x->aalg)
goto error;
@@ -221,24 +230,27 @@ static int ah_init_state(struct xfrm_state *x)
ahp->key = x->aalg->alg_key;
ahp->key_len = (x->aalg->alg_key_len+7)/8;
- ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
- if (!ahp->tfm)
+ tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ goto error;
+
+ ahp->tfm = tfm;
+ if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
goto error;
- ahp->icv = ah_hmac_digest;
/*
* Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
- * after a successful crypto_alloc_tfm().
+ * after a successful crypto_alloc_hash().
*/
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
- crypto_tfm_alg_digestsize(ahp->tfm)) {
+ crypto_hash_digestsize(tfm)) {
printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
- x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm),
+ x->aalg->alg_name, crypto_hash_digestsize(tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@@ -262,7 +274,7 @@ static int ah_init_state(struct xfrm_state *x)
error:
if (ahp) {
kfree(ahp->work_icv);
- crypto_free_tfm(ahp->tfm);
+ crypto_free_hash(ahp->tfm);
kfree(ahp);
}
return -EINVAL;
@@ -277,7 +289,7 @@ static void ah_destroy(struct xfrm_state *x)
kfree(ahp->work_icv);
ahp->work_icv = NULL;
- crypto_free_tfm(ahp->tfm);
+ crypto_free_hash(ahp->tfm);
ahp->tfm = NULL;
kfree(ahp);
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index fc2f8ce..b428489 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,4 @@
+#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@@ -16,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int err;
struct iphdr *top_iph;
struct ip_esp_hdr *esph;
- struct crypto_tfm *tfm;
+ struct crypto_blkcipher *tfm;
+ struct blkcipher_desc desc;
struct esp_data *esp;
struct sk_buff *trailer;
int blksize;
@@ -36,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
- blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
+ desc.tfm = tfm;
+ desc.flags = 0;
+ blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
clen = ALIGN(clen + 2, blksize);
if (esp->conf.padlen)
clen = ALIGN(clen, esp->conf.padlen);
@@ -92,7 +96,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
xfrm_aevent_doreplay(x);
if (esp->conf.ivlen)
- crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
+ crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
do {
struct scatterlist *sg = &esp->sgbuf[0];
@@ -103,26 +107,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
}
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
- crypto_cipher_encrypt(tfm, sg, sg, clen);
+ err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
} while (0);
+ if (unlikely(err))
+ goto error;
+
if (esp->conf.ivlen) {
- memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
- crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
+ memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
+ crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
}
if (esp->auth.icv_full_len) {
- esp->auth.icv(esp, skb, (u8*)esph-skb->data,
- sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
- pskb_put(skb, trailer, alen);
+ err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
+ sizeof(*esph) + esp->conf.ivlen + clen);
+ memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
}
ip_send_check(top_iph);
- err = 0;
-
error:
return err;
}
@@ -137,8 +142,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph;
struct ip_esp_hdr *esph;
struct esp_data *esp = x->data;
+ struct crypto_blkcipher *tfm = esp->conf.tfm;
+ struct blkcipher_desc desc = { .tfm = tfm };
struct sk_buff *trailer;
- int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
+ int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
int alen = esp->auth.icv_trunc_len;
int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
int nfrags;
@@ -146,6 +153,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
u8 nexthdr[2];
struct scatterlist *sg;
int padlen;
+ int err;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
goto out;
@@ -155,15 +163,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
/* If integrity check is required, do this. */
if (esp->auth.icv_full_len) {
- u8 sum[esp->auth.icv_full_len];
- u8 sum1[alen];
-
- esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
+ u8 sum[alen];
- if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
+ err = esp_mac_digest(esp, skb, 0, skb->len - alen);
+ if (err)
+ goto out;
+
+ if (skb_copy_bits(skb, skb->len - alen, sum, alen))
BUG();
- if (unlikely(memcmp(sum, sum1, alen))) {
+ if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
x->stats.integrity_failed++;
goto out;
}
@@ -178,7 +187,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
/* Get ivec. This can be wrong, check against another impls. */
if (esp->conf.ivlen)
- crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
+ crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
sg = &esp->sgbuf[0];
@@ -188,9 +197,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
- crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
+ err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
+ if (unlikely(err))
+ return err;
if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
BUG();
@@ -254,7 +265,7 @@ out:
static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
{
struct esp_data *esp = x->data;
- u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
+ u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
if (x->props.mode) {
mtu = ALIGN(mtu + 2, blksize);
@@ -293,11 +304,11 @@ static void esp_destroy(struct xfrm_state *x)
if (!esp)
return;
- crypto_free_tfm(esp->conf.tfm);
+ crypto_free_blkcipher(esp->conf.tfm);
esp->conf.tfm = NULL;
kfree(esp->conf.ivec);
esp->conf.ivec = NULL;
- crypto_free_tfm(esp->auth.tfm);
+ crypto_free_hash(esp->auth.tfm);
esp->auth.tfm = NULL;
kfree(esp->auth.work_icv);
esp->auth.work_icv = NULL;
@@ -307,6 +318,7 @@ static void esp_destroy(struct xfrm_state *x)
static int esp_init_state(struct xfrm_state *x)
{
struct esp_data *esp = NULL;
+ struct crypto_blkcipher *tfm;
/* null auth and encryption can have zero length keys */
if (x->aalg) {
@@ -322,22 +334,27 @@ static int esp_init_state(struct xfrm_state *x)
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
+ struct crypto_hash *hash;
esp->auth.key = x->aalg->alg_key;
esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
- esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
- if (esp->auth.tfm == NULL)
+ hash = crypto_alloc_hash(x->aalg->alg_name, 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hash))
+ goto error;
+
+ esp->auth.tfm = hash;
+ if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
goto error;
- esp->auth.icv = esp_hmac_digest;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
- crypto_tfm_alg_digestsize(esp->auth.tfm)) {
+ crypto_hash_digestsize(hash)) {
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name,
- crypto_tfm_alg_digestsize(esp->auth.tfm),
+ crypto_hash_digestsize(hash),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@@ -351,13 +368,11 @@ static int esp_init_state(struct xfrm_state *x)
}
esp->conf.key = x->ealg->alg_key;
esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
- if (x->props.ealgo == SADB_EALG_NULL)
- esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
- else
- esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
- if (esp->conf.tfm == NULL)
+ tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
goto error;
- esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm);
+ esp->conf.tfm = tfm;
+ esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
esp->conf.padlen = 0;
if (esp->conf.ivlen) {
esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@@ -365,7 +380,7 @@ static int esp_init_state(struct xfrm_state *x)
goto error;
get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
}
- if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len))
+ if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
goto error;
x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
if (x->props.mode)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index a0c28b2..5bb9c9f 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -32,7 +32,7 @@
struct ipcomp_tfms {
struct list_head list;
- struct crypto_tfm **tfms;
+ struct crypto_comp **tfms;
int users;
};
@@ -46,7 +46,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
int err, plen, dlen;
struct ipcomp_data *ipcd = x->data;
u8 *start, *scratch;
- struct crypto_tfm *tfm;
+ struct crypto_comp *tfm;
int cpu;
plen = skb->len;
@@ -107,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph = skb->nh.iph;
struct ipcomp_data *ipcd = x->data;
u8 *start, *scratch;
- struct crypto_tfm *tfm;
+ struct crypto_comp *tfm;
int cpu;
ihlen = iph->ihl * 4;
@@ -302,7 +302,7 @@ static void **ipcomp_alloc_scratches(void)
return scratches;
}
-static void ipcomp_free_tfms(struct crypto_tfm **tfms)
+static void ipcomp_free_tfms(struct crypto_comp **tfms)
{
struct ipcomp_tfms *pos;
int cpu;
@@ -324,28 +324,28 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
return;
for_each_possible_cpu(cpu) {
- struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
- crypto_free_tfm(tfm);
+ struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
+ crypto_free_comp(tfm);
}
free_percpu(tfms);
}
-static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
+static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
{
struct ipcomp_tfms *pos;
- struct crypto_tfm **tfms;
+ struct crypto_comp **tfms;
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
- struct crypto_tfm *tfm;
+ struct crypto_comp *tfm;
tfms = pos->tfms;
tfm = *per_cpu_ptr(tfms, cpu);
- if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) {
+ if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
return tfms;
}
@@ -359,12 +359,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
INIT_LIST_HEAD(&pos->list);
list_add(&pos->list, &ipcomp_tfms_list);
- pos->tfms = tfms = alloc_percpu(struct crypto_tfm *);
+ pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
if (!tfms)
goto error;
for_each_possible_cpu(cpu) {
- struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
+ struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
+ CRYPTO_ALG_ASYNC);
if (!tfm)
goto error;
*per_cpu_ptr(tfms, cpu) = tfm;