summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-12-04 00:02:46 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2015-12-04 00:02:46 (GMT)
commit071f5d105a0ae93aeb02197c4ee3557e8cc57a21 (patch)
tree3d1cee6ce0235a2d36fffbae2b4f4ecd8121107d
parent2873d32ff493ecbfb7d2c7f56812ab941dda42f4 (diff)
parente3c9b1ef78eb111f925dd04992a3de1f383e8f49 (diff)
downloadlinux-071f5d105a0ae93aeb02197c4ee3557e8cc57a21.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A lot of Thanksgiving turkey leftovers accumulated, here goes: 1) Fix bluetooth l2cap_chan object leak, from Johan Hedberg. 2) IDs for some new iwlwifi chips, from Oren Givon. 3) Fix rtlwifi lockups on boot, from Larry Finger. 4) Fix memory leak in fm10k, from Stephen Hemminger. 5) We have a route leak in the ipv6 tunnel infrastructure, fix from Paolo Abeni. 6) Fix buffer pointer handling in arm64 bpf JIT,f rom Zi Shen Lim. 7) Wrong lockdep annotations in tcp md5 support, fix from Eric Dumazet. 8) Work around some middle boxes which prevent proper handling of TCP Fast Open, from Yuchung Cheng. 9) TCP repair can do huge kmalloc() requests, build paged SKBs instead. From Eric Dumazet. 10) Fix msg_controllen overflow in scm_detach_fds, from Daniel Borkmann. 11) Fix device leaks on ipmr table destruction in ipv4 and ipv6, from Nikolay Aleksandrov. 12) Fix use after free in epoll with AF_UNIX sockets, from Rainer Weikusat. 13) Fix double free in VRF code, from Nikolay Aleksandrov. 14) Fix skb leaks on socket receive queue in tipc, from Ying Xue. 15) Fix ifup/ifdown crach in xgene driver, from Iyappan Subramanian. 16) Fix clearing of persistent array maps in bpf, from Daniel Borkmann. 17) In TCP, for the cross-SYN case, we don't initialize tp->copied_seq early enough. From Eric Dumazet. 18) Fix out of bounds accesses in bpf array implementation when updating elements, from Daniel Borkmann. 19) Fill gaps in RCU protection of np->opt in ipv6 stack, from Eric Dumazet. 20) When dumping proxy neigh entries, we have to accomodate NULL device pointers properly, from Konstantin Khlebnikov. 21) SCTP doesn't release all ipv6 socket resources properly, fix from Eric Dumazet. 22) Prevent underflows of sch->q.qlen for multiqueue packet schedulers, also from Eric Dumazet. 23) Fix MAC and unicast list handling in bnxt_en driver, from Jeffrey Huang and Michael Chan. 24) Don't actively scan radar channels, from Antonio Quartulli" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (110 commits) net: phy: reset only targeted phy bnxt_en: Setup uc_list mac filters after resetting the chip. bnxt_en: enforce proper storing of MAC address bnxt_en: Fixed incorrect implementation of ndo_set_mac_address net: lpc_eth: remove irq > NR_IRQS check from probe() net_sched: fix qdisc_tree_decrease_qlen() races openvswitch: fix hangup on vxlan/gre/geneve device deletion ipv4: igmp: Allow removing groups from a removed interface ipv6: sctp: implement sctp_v6_destroy_sock() arm64: bpf: add 'store immediate' instruction ipv6: kill sk_dst_lock ipv6: sctp: add rcu protection around np->opt net/neighbour: fix crash at dumping device-agnostic proxy entries sctp: use GFP_USER for user-controlled kmalloc sctp: convert sack_needed and sack_generation to bits ipv6: add complete rcu protection around np->opt bpf: fix allocation warnings in bpf maps and integer overflow mvebu: dts: enable IP checksum with jumbo frames for Armada 38x on Port0 net: mvneta: enable setting custom TX IP checksum limit net: mvneta: fix error path for building skb ...
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt6
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi1
-rw-r--r--arch/arm64/net/bpf_jit_comp.c47
-rw-r--r--crypto/algif_aead.c4
-rw-r--r--crypto/algif_skcipher.c6
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c7
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/rcar_can.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/ti_hecc.c7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c5
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c31
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/aurora/Kconfig20
-rw-r--r--drivers/net/ethernet/aurora/Makefile1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1552
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c23
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c28
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c33
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c28
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c3
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c71
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/wan/hdlc_fr.c10
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c49
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/dns_resolver.h2
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/net.h13
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/ip6_route.h17
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/mac80211.h6
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/structs.h16
-rw-r--r--include/net/sock.h32
-rw-r--r--kernel/bpf/arraymap.c10
-rw-r--r--kernel/bpf/hashtab.c34
-rw-r--r--kernel/bpf/inode.c6
-rw-r--r--kernel/bpf/syscall.c40
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/netclassid_cgroup.c26
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/sock.c12
-rw-r--r--net/core/stream.c6
-rw-r--r--net/dccp/ipv6.c37
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/decnet/af_decnet.c8
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/ipv4/igmp.c5
-rw-r--r--net/ipv4/ipmr.c23
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_timer.c14
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/icmp.c14
-rw-r--r--net/ipv6/inet6_connection_sock.c21
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c19
-rw-r--r--net/ipv6/ipv6_sockglue.c33
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c8
-rw-r--r--net/mac80211/iface.c5
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c8
-rw-r--r--net/mac80211/scan.c9
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/vport-geneve.c1
-rw-r--r--net/openvswitch/vport-gre.c1
-rw-r--r--net/openvswitch/vport-netdev.c8
-rw-r--r--net/openvswitch/vport.c8
-rw-r--r--net/openvswitch/vport.h8
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rxrpc/ar-ack.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/sched/sch_api.c27
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_mq.c4
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sctp/ipv6.c13
-rw-r--r--net/sctp/socket.c39
-rw-r--r--net/socket.c21
-rw-r--r--net/sunrpc/xprtsock.c14
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/socket.c10
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/unix/af_unix.c268
174 files changed, 3192 insertions, 646 deletions
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index f5a8ca2..aeea50c 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -8,6 +8,11 @@ Required properties:
- phy-mode: See ethernet.txt file in the same directory
- clocks: a pointer to the reference clock for this device.
+Optional properties:
+- tx-csum-limit: maximum mtu supported by port that allow TX checksum.
+ Value is presented in bytes. If not used, by default 1600B is set for
+ "marvell,armada-370-neta" and 9800B for others.
+
Example:
ethernet@d0070000 {
@@ -15,6 +20,7 @@ ethernet@d0070000 {
reg = <0xd0070000 0x2500>;
interrupts = <8>;
clocks = <&gate_clk 4>;
+ tx-csum-limit = <9800>
status = "okay";
phy = <&phy0>;
phy-mode = "rgmii-id";
diff --git a/MAINTAINERS b/MAINTAINERS
index cba790b..5192700 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1847,7 +1847,7 @@ S: Supported
F: drivers/net/wireless/ath/ath6kl/
WILOCITY WIL6210 WIRELESS DRIVER
-M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
+M: Maya Erez <qca_merez@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
L: wil6210@qca.qualcomm.com
S: Supported
@@ -10903,9 +10903,9 @@ S: Maintained
F: drivers/media/tuners/tua9001*
TULIP NETWORK DRIVERS
-M: Grant Grundler <grundler@parisc-linux.org>
L: netdev@vger.kernel.org
-S: Maintained
+L: linux-parisc@vger.kernel.org
+S: Orphan
F: drivers/net/ethernet/dec/tulip/
TUN/TAP driver
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index c6a0e9d..e8b7f67 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -498,6 +498,7 @@
reg = <0x70000 0x4000>;
interrupts-extended = <&mpic 8>;
clocks = <&gateclk 4>;
+ tx-csum-limit = <9800>;
status = "disabled";
};
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index d6a53ef..b162ad7 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
+#define _STACK_SIZE \
+ (MAX_BPF_STACK \
+ + 4 /* extra for skb_copy_bits buffer */)
+
+#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+
static void build_prologue(struct jit_ctx *ctx)
{
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx)
const u8 rx = bpf2a64[BPF_REG_X];
const u8 tmp1 = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
- int stack_size = MAX_BPF_STACK;
-
- stack_size += 4; /* extra for skb_copy_bits buffer */
- stack_size = STACK_ALIGN(stack_size);
/*
* BPF prog stack layout
@@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx)
* | ... | callee saved registers
* +-----+
* | | x25/x26
- * BPF fp register => -80:+-----+
+ * BPF fp register => -80:+-----+ <= (BPF_FP)
* | |
* | ... | BPF prog stack
* | |
- * | |
- * current A64_SP => +-----+
+ * +-----+ <= (BPF_FP - MAX_BPF_STACK)
+ * |RSVD | JIT scratchpad
+ * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
* | |
* | ... | Function call stack
* | |
@@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx)
emit(A64_MOV(1, fp, A64_SP), ctx);
/* Set up function call stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+ emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
/* Clear registers A and X */
emit_a64_mov_i64(ra, 0, ctx);
@@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx)
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tmp1 = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
- int stack_size = MAX_BPF_STACK;
-
- stack_size += 4; /* extra for skb_copy_bits buffer */
- stack_size = STACK_ALIGN(stack_size);
/* We're done with BPF stack */
- emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+ emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
/* Restore fs (x25) and x26 */
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -591,7 +590,25 @@ emit_cond_jmp:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
- goto notyet;
+ /* Load imm to a register then store it */
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp2, off, ctx);
+ emit_a64_mov_i(1, tmp, imm, ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_STR32(tmp, dst, tmp2), ctx);
+ break;
+ case BPF_H:
+ emit(A64_STRH(tmp, dst, tmp2), ctx);
+ break;
+ case BPF_B:
+ emit(A64_STRB(tmp, dst, tmp2), ctx);
+ break;
+ case BPF_DW:
+ emit(A64_STR64(tmp, dst, tmp2), ctx);
+ break;
+ }
+ break;
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_W:
@@ -658,7 +675,7 @@ emit_cond_jmp:
return -EINVAL;
}
emit_a64_mov_i64(r3, size, ctx);
- emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
+ emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 0aa6fdf..6d4d456 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
if (flags & MSG_DONTWAIT)
return -EAGAIN;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
}
finish_wait(sk_sleep(sk), &wait);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index af31a0e..ca9efe1 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
if (flags & MSG_DONTWAIT)
return -EAGAIN;
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (signal_pending(current))
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
return -EAGAIN;
}
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
}
finish_wait(sk_sleep(sk), &wait);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b33f53b..bf04d2a 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+ HiSax_putstatus(cs, NULL, cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ",
"warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 4a48255..90449e1 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+ HiSax_putstatus(cs, NULL, cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
}
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index b1fad81..13b2151 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+ HiSax_putstatus(cs, NULL, cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
}
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index b420f8b..ba4beb2 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
dp--;
*dp++ = '\n';
*dp = 0;
- HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+ HiSax_putstatus(cs, NULL, cs->dlog);
} else
HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
}
if (finish) {
*dp = 0;
- HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+ HiSax_putstatus(cs, NULL, cs->dlog);
return;
}
if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
}
*dp = 0;
- HiSax_putstatus(cs, NULL, "%s", cs->dlog);
+ HiSax_putstatus(cs, NULL, cs->dlog);
}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 57dadd5..1deb8ff 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
cf->data[2] |= CAN_ERR_PROT_FORM;
else if (status & SER)
cf->data[2] |= CAN_ERR_PROT_STUFF;
- else
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
}
priv->can.state = state;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5d214d1..f91b094 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
* type of the last error to occur on the CAN bus
*/
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
switch (lec_type) {
case LEC_STUFF_ERROR:
@@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
- CAN_ERR_PROT_LOC_ACK_DEL);
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
@@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL);
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 70a8cbb..1e37313 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status)
cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case STAT_LEC_CRC:
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
}
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 868fe94..41c0fc9 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev,
if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
netdev_dbg(dev, "ACK_ERR irq\n");
cf->can_id |= CAN_ERR_ACK;
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
tx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
netdev_dbg(dev, "CRC_ERR irq\n");
cf->data[2] |= CAN_ERR_PROT_BIT;
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
rx_errors = 1;
}
if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c1e8536..5d04f54 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
cf->data[3] = ecc & ECC_SEG;
break;
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index ef65517..39cf911 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
* type of the last error to occur on the CAN bus
*/
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
switch (lec_type) {
case LEC_STUFF_ERROR:
@@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
- CAN_ERR_PROT_LOC_ACK_DEL);
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
@@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL);
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index e187ca7..c131788 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
stats->rx_errors++;
break;
case PCH_CRC_ERR:
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
break;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7bd5419..bc46be3 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev)
u8 ecsr;
netdev_dbg(priv->ndev, "Bus error interrupt:\n");
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_UNSPEC;
- }
+
ecsr = readb(&priv->regs->ecsr);
if (ecsr & RCAR_CAN_ECSR_ADEF) {
netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
tx_errors++;
writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
if (skb)
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
}
if (ecsr & RCAR_CAN_ECSR_BE0F) {
netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
@@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev)
rx_errors++;
writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
if (skb)
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (ecsr & RCAR_CAN_ECSR_AEF) {
netdev_dbg(priv->ndev, "ACK Error\n");
@@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev)
writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
if (skb) {
cf->can_id |= CAN_ERR_ACK;
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
}
}
if (ecsr & RCAR_CAN_ECSR_FEF) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7b92e91..8dda3b7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
priv->write_reg(priv, SJA1000_RXERR, 0x0);
priv->read_reg(priv, SJA1000_ECC);
+ /* clear interrupt flags */
+ priv->read_reg(priv, SJA1000_IR);
+
/* leave reset mode */
set_normal_mode(dev);
}
@@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
cf->data[3] = ecc & ECC_SEG;
break;
}
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index d9a42c6..68ef0a4 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE)
>> 16;
break;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index cf345cb..680d1ff 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
if (err_status & HECC_BUS_ERROR) {
++priv->can.can_stats.bus_error;
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
if (err_status & HECC_CANES_FE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
cf->data[2] |= CAN_ERR_PROT_FORM;
@@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
if (err_status & HECC_CANES_CRCE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (err_status & HECC_CANES_ACKE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
- CAN_ERR_PROT_LOC_ACK_DEL;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
}
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2d39038..fc5b756 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
cf->data[3] = ecc & SJA1000_ECC_SEG;
break;
}
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 0e5a449..113e64f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
cf->data[3] = ecc & SJA1000_ECC_SEG;
break;
}
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 8b17a90..022bfa1 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
if (es->leaf.error_factor & M16C_EF_ACKE)
- cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
if (es->leaf.error_factor & M16C_EF_CRCE)
- cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL);
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
if (es->leaf.error_factor & M16C_EF_FORME)
cf->data[2] |= CAN_ERR_PROT_FORM;
if (es->leaf.error_factor & M16C_EF_STFE)
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index de95b1c..a731720 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
tx_errors = 1;
break;
case USB_8DEV_STATUSMSG_CRC:
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
rx_errors = 1;
break;
case USB_8DEV_STATUSMSG_BIT0:
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fc55e8e..51670b3 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->data[2] |= CAN_ERR_PROT_UNSPEC;
- }
/* Check for Ack error interrupt */
if (err_status & XCAN_ESR_ACKER_MASK) {
stats->tx_errors++;
if (skb) {
cf->can_id |= CAN_ERR_ACK;
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
}
}
@@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
stats->rx_errors++;
if (skb) {
cf->can_id |= CAN_ERR_PROT;
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
}
priv->can.can_stats.bus_error++;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 955d06b..31c5e47 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
+source "drivers/net/ethernet/aurora/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 4a2ee98..071f84e 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
+obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
obj-$(CONFIG_NET_CADENCE) += cadence/
obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 991412c..9147a01 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -450,12 +450,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- pdata->ring_ops->wr_cmd(tx_ring, count);
skb_tx_timestamp(skb);
pdata->stats.tx_packets++;
pdata->stats.tx_bytes += skb->len;
+ pdata->ring_ops->wr_cmd(tx_ring, count);
return NETDEV_TX_OK;
}
@@ -688,10 +688,10 @@ static int xgene_enet_open(struct net_device *ndev)
mac_ops->tx_enable(pdata);
mac_ops->rx_enable(pdata);
+ xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
- xgene_enet_napi_enable(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_start(pdata->phy_dev);
@@ -715,13 +715,13 @@ static int xgene_enet_close(struct net_device *ndev)
else
cancel_delayed_work_sync(&pdata->link_work);
- xgene_enet_napi_disable(pdata);
- xgene_enet_free_irq(ndev);
- xgene_enet_process_ring(pdata->rx_ring, -1);
-
mac_ops->tx_disable(pdata);
mac_ops->rx_disable(pdata);
+ xgene_enet_free_irq(ndev);
+ xgene_enet_napi_disable(pdata);
+ xgene_enet_process_ring(pdata->rx_ring, -1);
+
return 0;
}
@@ -1474,15 +1474,15 @@ static int xgene_enet_probe(struct platform_device *pdev)
}
ndev->hw_features = ndev->features;
- ret = register_netdev(ndev);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
- netdev_err(ndev, "Failed to register netdev\n");
+ netdev_err(ndev, "No usable DMA configuration\n");
goto err;
}
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ret = register_netdev(ndev);
if (ret) {
- netdev_err(ndev, "No usable DMA configuration\n");
+ netdev_err(ndev, "Failed to register netdev\n");
goto err;
}
@@ -1490,14 +1490,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (ret)
goto err;
- xgene_enet_napi_add(pdata);
mac_ops = pdata->mac_ops;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
ret = xgene_enet_mdio_config(pdata);
- else
+ if (ret)
+ goto err;
+ } else {
INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
+ }
- return ret;
+ xgene_enet_napi_add(pdata);
+ return 0;
err:
unregister_netdev(ndev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8af3ce..bd377a6 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index af006b4..0959e68 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -37,6 +37,7 @@
#define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091
+#define ALX_DEV_ID_E2400 0xe0a1
#define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
new file mode 100644
index 0000000..a3c7106
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -0,0 +1,20 @@
+config NET_VENDOR_AURORA
+ bool "Aurora VLSI devices"
+ help
+ If you have a network (Ethernet) device belonging to this class,
+ say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ questions about Aurora devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_AURORA
+
+config AURORA_NB8800
+ tristate "Aurora AU-NB8800 support"
+ select PHYLIB
+ help
+ Support for the AU-NB8800 gigabit Ethernet controller.
+
+endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
new file mode 100644
index 0000000..6cb528a
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
new file mode 100644
index 0000000..ecc4a33
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -0,0 +1,1552 @@
+/*
+ * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
+ *
+ * Mostly rewritten, based on driver from Sigma Designs. Original
+ * copyright notice below.
+ *
+ *
+ * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
+ *
+ * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/dma-mapping.h>
+#include <linux/phy.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <asm/barrier.h>
+
+#include "nb8800.h"
+
+static void nb8800_tx_done(struct net_device *dev);
+static int nb8800_dma_stop(struct net_device *dev);
+
+static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
+{
+ return readb_relaxed(priv->base + reg);
+}
+
+static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
+{
+ return readl_relaxed(priv->base + reg);
+}
+
+static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
+{
+ writeb_relaxed(val, priv->base + reg);
+}
+
+static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
+{
+ writew_relaxed(val, priv->base + reg);
+}
+
+static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
+{
+ writel_relaxed(val, priv->base + reg);
+}
+
+static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
+ u32 mask, u32 val)
+{
+ u32 old = nb8800_readb(priv, reg);
+ u32 new = (old & ~mask) | (val & mask);
+
+ if (new != old)
+ nb8800_writeb(priv, reg, new);
+}
+
+static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
+ u32 mask, u32 val)
+{
+ u32 old = nb8800_readl(priv, reg);
+ u32 new = (old & ~mask) | (val & mask);
+
+ if (new != old)
+ nb8800_writel(priv, reg, new);
+}
+
+static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
+ bool set)
+{
+ nb8800_maskb(priv, reg, bits, set ? bits : 0);
+}
+
+static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
+{
+ nb8800_maskb(priv, reg, bits, bits);
+}
+
+static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
+{
+ nb8800_maskb(priv, reg, bits, 0);
+}
+
+static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
+ bool set)
+{
+ nb8800_maskl(priv, reg, bits, set ? bits : 0);
+}
+
+static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
+{
+ nb8800_maskl(priv, reg, bits, bits);
+}
+
+static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
+{
+ nb8800_maskl(priv, reg, bits, 0);
+}
+
+static int nb8800_mdio_wait(struct mii_bus *bus)
+{
+ struct nb8800_priv *priv = bus->priv;
+ u32 val;
+
+ return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
+ val, !(val & MDIO_CMD_GO), 1, 1000);
+}
+
+static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
+{
+ struct nb8800_priv *priv = bus->priv;
+ int err;
+
+ err = nb8800_mdio_wait(bus);
+ if (err)
+ return err;
+
+ nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
+ udelay(10);
+ nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
+
+ return nb8800_mdio_wait(bus);
+}
+
+static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct nb8800_priv *priv = bus->priv;
+ u32 val;
+ int err;
+
+ err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
+ if (err)
+ return err;
+
+ val = nb8800_readl(priv, NB8800_MDIO_STS);
+ if (val & MDIO_STS_ERR)
+ return 0xffff;
+
+ return val & 0xffff;
+}
+
+static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
+ MDIO_CMD_DATA(val) | MDIO_CMD_WR;
+
+ return nb8800_mdio_cmd(bus, cmd);
+}
+
+static void nb8800_mac_tx(struct net_device *dev, bool enable)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
+ cpu_relax();
+
+ nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
+}
+
+static void nb8800_mac_rx(struct net_device *dev, bool enable)
+{
+ nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
+}
+
+static void nb8800_mac_af(struct net_device *dev, bool enable)
+{
+ nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
+}
+
+static void nb8800_start_rx(struct net_device *dev)
+{
+ nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
+}
+
+static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
+ struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
+ int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
+ dma_addr_t dma_addr;
+ struct page *page;
+ unsigned long offset;
+ void *data;
+
+ data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
+ if (!data)
+ return -ENOMEM;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+
+ dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&dev->dev, dma_addr)) {
+ skb_free_frag(data);
+ return -ENOMEM;
+ }
+
+ rxb->page = page;
+ rxb->offset = offset;
+ rxd->desc.s_addr = dma_addr;
+
+ return 0;
+}
+
+static void nb8800_receive(struct net_device *dev, unsigned int i,
+ unsigned int len)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
+ struct page *page = priv->rx_bufs[i].page;
+ int offset = priv->rx_bufs[i].offset;
+ void *data = page_address(page) + offset;
+ dma_addr_t dma = rxd->desc.s_addr;
+ struct sk_buff *skb;
+ unsigned int size;
+ int err;
+
+ size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
+
+ skb = napi_alloc_skb(&priv->napi, size);
+ if (!skb) {
+ netdev_err(dev, "rx skb allocation failed\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ if (len <= RX_COPYBREAK) {
+ dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, len), data, len);
+ dma_sync_single_for_device(&dev->dev, dma, len,
+ DMA_FROM_DEVICE);
+ } else {
+ err = nb8800_alloc_rx(dev, i, true);
+ if (err) {
+ netdev_err(dev, "rx buffer allocation failed\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset + RX_COPYHDR, len - RX_COPYHDR,
+ RX_BUF_SIZE);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+}
+
+static void nb8800_rx_error(struct net_device *dev, u32 report)
+{
+ if (report & RX_LENGTH_ERR)
+ dev->stats.rx_length_errors++;
+
+ if (report & RX_FCS_ERR)
+ dev->stats.rx_crc_errors++;
+
+ if (report & RX_FIFO_OVERRUN)
+ dev->stats.rx_fifo_errors++;
+
+ if (report & RX_ALIGNMENT_ERROR)
+ dev->stats.rx_frame_errors++;
+
+ dev->stats.rx_errors++;
+}
+
+static int nb8800_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *dev = napi->dev;
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_rx_desc *rxd;
+ unsigned int last = priv->rx_eoc;
+ unsigned int next;
+ int work = 0;
+
+ nb8800_tx_done(dev);
+
+again:
+ while (work < budget) {
+ struct nb8800_rx_buf *rxb;
+ unsigned int len;
+
+ next = (last + 1) % RX_DESC_COUNT;
+
+ rxb = &priv->rx_bufs[next];
+ rxd = &priv->rx_descs[next];
+
+ if (!rxd->report)
+ break;
+
+ len = RX_BYTES_TRANSFERRED(rxd->report);
+
+ if (IS_RX_ERROR(rxd->report))
+ nb8800_rx_error(dev, rxd->report);
+ else
+ nb8800_receive(dev, next, len);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ if (rxd->report & RX_MULTICAST_PKT)
+ dev->stats.multicast++;
+
+ rxd->report = 0;
+ last = next;
+ work++;
+ }
+
+ if (work) {
+ priv->rx_descs[last].desc.config |= DESC_EOC;
+ wmb(); /* ensure new EOC is written before clearing old */
+ priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
+ priv->rx_eoc = last;
+ nb8800_start_rx(dev);
+ }
+
+ if (work < budget) {
+ nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
+
+ /* If a packet arrived after we last checked but
+ * before writing RX_ITR, the interrupt will be
+ * delayed, so we retrieve it now.
+ */
+ if (priv->rx_descs[next].report)
+ goto again;
+
+ napi_complete_done(napi, work);
+ }
+
+ return work;
+}
+
+static void __nb8800_tx_dma_start(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_tx_buf *txb;
+ u32 txc_cr;
+
+ txb = &priv->tx_bufs[priv->tx_queue];
+ if (!txb->ready)
+ return;
+
+ txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
+ if (txc_cr & TCR_EN)
+ return;
+
+ nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
+ wmb(); /* ensure desc addr is written before starting DMA */
+ nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
+
+ priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
+}
+
+static void nb8800_tx_dma_start(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->tx_lock);
+ __nb8800_tx_dma_start(dev);
+ spin_unlock_irq(&priv->tx_lock);
+}
+
+static void nb8800_tx_dma_start_irq(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->tx_lock);
+ __nb8800_tx_dma_start(dev);
+ spin_unlock(&priv->tx_lock);
+}
+
+static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_tx_desc *txd;
+ struct nb8800_tx_buf *txb;
+ struct nb8800_dma_desc *desc;
+ dma_addr_t dma_addr;
+ unsigned int dma_len;
+ unsigned int align;
+ unsigned int next;
+
+ if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ align = (8 - (uintptr_t)skb->data) & 7;
+
+ dma_len = skb->len - align;
+ dma_addr = dma_map_single(&dev->dev, skb->data + align,
+ dma_len, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&dev->dev, dma_addr)) {
+ netdev_err(dev, "tx dma mapping error\n");
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
+ netif_stop_queue(dev);
+ skb->xmit_more = 0;
+ }
+
+ next = priv->tx_next;
+ txb = &priv->tx_bufs[next];
+ txd = &priv->tx_descs[next];
+ desc = &txd->desc[0];
+
+ next = (next + 1) % TX_DESC_COUNT;
+
+ if (align) {
+ memcpy(txd->buf, skb->data, align);
+
+ desc->s_addr =
+ txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
+ desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
+ desc->config = DESC_BTS(2) | DESC_DS | align;
+
+ desc++;
+ }
+
+ desc->s_addr = dma_addr;
+ desc->n_addr = priv->tx_bufs[next].dma_desc;
+ desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
+
+ if (!skb->xmit_more)
+ desc->config |= DESC_EOC;
+
+ txb->skb = skb;
+ txb->dma_addr = dma_addr;
+ txb->dma_len = dma_len;
+
+ if (!priv->tx_chain) {
+ txb->chain_len = 1;
+ priv->tx_chain = txb;
+ } else {
+ priv->tx_chain->chain_len++;
+ }
+
+ netdev_sent_queue(dev, skb->len);
+
+ priv->tx_next = next;
+
+ if (!skb->xmit_more) {
+ smp_wmb();
+ priv->tx_chain->ready = true;
+ priv->tx_chain = NULL;
+ nb8800_tx_dma_start(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void nb8800_tx_error(struct net_device *dev, u32 report)
+{
+ if (report & TX_LATE_COLLISION)
+ dev->stats.collisions++;
+
+ if (report & TX_PACKET_DROPPED)
+ dev->stats.tx_dropped++;
+
+ if (report & TX_FIFO_UNDERRUN)
+ dev->stats.tx_fifo_errors++;
+
+ dev->stats.tx_errors++;
+}
+
+static void nb8800_tx_done(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ unsigned int limit = priv->tx_next;
+ unsigned int done = priv->tx_done;
+ unsigned int packets = 0;
+ unsigned int len = 0;
+
+ while (done != limit) {
+ struct nb8800_tx_desc *txd = &priv->tx_descs[done];
+ struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
+ struct sk_buff *skb;
+
+ if (!txd->report)
+ break;
+
+ skb = txb->skb;
+ len += skb->len;
+
+ dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
+ DMA_TO_DEVICE);
+
+ if (IS_TX_ERROR(txd->report)) {
+ nb8800_tx_error(dev, txd->report);
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
+ dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
+
+ txb->skb = NULL;
+ txb->ready = false;
+ txd->report = 0;
+
+ done = (done + 1) % TX_DESC_COUNT;
+ packets++;
+ }
+
+ if (packets) {
+ smp_mb__before_atomic();
+ atomic_add(packets, &priv->tx_free);
+ netdev_completed_queue(dev, packets, len);
+ netif_wake_queue(dev);
+ priv->tx_done = done;
+ }
+}
+
+static irqreturn_t nb8800_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct nb8800_priv *priv = netdev_priv(dev);
+ irqreturn_t ret = IRQ_NONE;
+ u32 val;
+
+ /* tx interrupt */
+ val = nb8800_readl(priv, NB8800_TXC_SR);
+ if (val) {
+ nb8800_writel(priv, NB8800_TXC_SR, val);
+
+ if (val & TSR_DI)
+ nb8800_tx_dma_start_irq(dev);
+
+ if (val & TSR_TI)
+ napi_schedule_irqoff(&priv->napi);
+
+ if (unlikely(val & TSR_DE))
+ netdev_err(dev, "TX DMA error\n");
+
+ /* should never happen with automatic status retrieval */
+ if (unlikely(val & TSR_TO))
+ netdev_err(dev, "TX Status FIFO overflow\n");
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* rx interrupt */
+ val = nb8800_readl(priv, NB8800_RXC_SR);
+ if (val) {
+ nb8800_writel(priv, NB8800_RXC_SR, val);
+
+ if (likely(val & (RSR_RI | RSR_DI))) {
+ nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
+ napi_schedule_irqoff(&priv->napi);
+ }
+
+ if (unlikely(val & RSR_DE))
+ netdev_err(dev, "RX DMA error\n");
+
+ /* should never happen with automatic status retrieval */
+ if (unlikely(val & RSR_RO))
+ netdev_err(dev, "RX Status FIFO overflow\n");
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void nb8800_mac_config(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ bool gigabit = priv->speed == SPEED_1000;
+ u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
+ u32 mac_mode = 0;
+ u32 slot_time;
+ u32 phy_clk;
+ u32 ict;
+
+ if (!priv->duplex)
+ mac_mode |= HALF_DUPLEX;
+
+ if (gigabit) {
+ if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ mac_mode |= RGMII_MODE;
+
+ mac_mode |= GMAC_MODE;
+ phy_clk = 125000000;
+
+ /* Should be 512 but register is only 8 bits */
+ slot_time = 255;
+ } else {
+ phy_clk = 25000000;
+ slot_time = 128;
+ }
+
+ ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
+
+ nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
+ nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
+ nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
+}
+
+static void nb8800_pause_config(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u32 rxcr;
+
+ if (priv->pause_aneg) {
+ if (!phydev || !phydev->link)
+ return;
+
+ priv->pause_rx = phydev->pause;
+ priv->pause_tx = phydev->pause ^ phydev->asym_pause;
+ }
+
+ nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
+
+ rxcr = nb8800_readl(priv, NB8800_RXC_CR);
+ if (!!(rxcr & RCR_FL) == priv->pause_tx)
+ return;
+
+ if (netif_running(dev)) {
+ napi_disable(&priv->napi);
+ netif_tx_lock_bh(dev);
+ nb8800_dma_stop(dev);
+ nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
+ nb8800_start_rx(dev);
+ netif_tx_unlock_bh(dev);
+ napi_enable(&priv->napi);
+ } else {
+ nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
+ }
+}
+
+static void nb8800_link_reconfigure(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int change = 0;
+
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ priv->speed = phydev->speed;
+ change = 1;
+ }
+
+ if (phydev->duplex != priv->duplex) {
+ priv->duplex = phydev->duplex;
+ change = 1;
+ }
+
+ if (change)
+ nb8800_mac_config(dev);
+
+ nb8800_pause_config(dev);
+ }
+
+ if (phydev->link != priv->link) {
+ priv->link = phydev->link;
+ change = 1;
+ }
+
+ if (change)
+ phy_print_status(priv->phydev);
+}
+
+static void nb8800_update_mac_addr(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
+}
+
+static int nb8800_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sock = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ether_addr_copy(dev->dev_addr, sock->sa_data);
+ nb8800_update_mac_addr(dev);
+
+ return 0;
+}
+
+static void nb8800_mc_init(struct net_device *dev, int val)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ nb8800_writeb(priv, NB8800_MC_INIT, val);
+ readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
+ 1, 1000);
+}
+
+static void nb8800_set_rx_mode(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ nb8800_mac_af(dev, false);
+ return;
+ }
+
+ nb8800_mac_af(dev, true);
+ nb8800_mc_init(dev, 0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ for (i = 0; i < ETH_ALEN; i++)
+ nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
+
+ nb8800_mc_init(dev, 0xff);
+ }
+}
+
+#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
+#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
+
+static void nb8800_dma_free(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ if (priv->rx_bufs) {
+ for (i = 0; i < RX_DESC_COUNT; i++)
+ if (priv->rx_bufs[i].page)
+ put_page(priv->rx_bufs[i].page);
+
+ kfree(priv->rx_bufs);
+ priv->rx_bufs = NULL;
+ }
+
+ if (priv->tx_bufs) {
+ for (i = 0; i < TX_DESC_COUNT; i++)
+ kfree_skb(priv->tx_bufs[i].skb);
+
+ kfree(priv->tx_bufs);
+ priv->tx_bufs = NULL;
+ }
+
+ if (priv->rx_descs) {
+ dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
+ priv->rx_desc_dma);
+ priv->rx_descs = NULL;
+ }
+
+ if (priv->tx_descs) {
+ dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
+ priv->tx_desc_dma);
+ priv->tx_descs = NULL;
+ }
+}
+
+static void nb8800_dma_reset(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_rx_desc *rxd;
+ struct nb8800_tx_desc *txd;
+ unsigned int i;
+
+ for (i = 0; i < RX_DESC_COUNT; i++) {
+ dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
+
+ rxd = &priv->rx_descs[i];
+ rxd->desc.n_addr = rx_dma + sizeof(*rxd);
+ rxd->desc.r_addr =
+ rx_dma + offsetof(struct nb8800_rx_desc, report);
+ rxd->desc.config = priv->rx_dma_config;
+ rxd->report = 0;
+ }
+
+ rxd->desc.n_addr = priv->rx_desc_dma;
+ rxd->desc.config |= DESC_EOC;
+
+ priv->rx_eoc = RX_DESC_COUNT - 1;
+
+ for (i = 0; i < TX_DESC_COUNT; i++) {
+ struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
+ dma_addr_t r_dma = txb->dma_desc +
+ offsetof(struct nb8800_tx_desc, report);
+
+ txd = &priv->tx_descs[i];
+ txd->desc[0].r_addr = r_dma;
+ txd->desc[1].r_addr = r_dma;
+ txd->report = 0;
+ }
+
+ priv->tx_next = 0;
+ priv->tx_queue = 0;
+ priv->tx_done = 0;
+ atomic_set(&priv->tx_free, TX_DESC_COUNT);
+
+ nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
+
+ wmb(); /* ensure all setup is written before starting */
+}
+
+static int nb8800_dma_init(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ unsigned int n_rx = RX_DESC_COUNT;
+ unsigned int n_tx = TX_DESC_COUNT;
+ unsigned int i;
+ int err;
+
+ priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
+ &priv->rx_desc_dma, GFP_KERNEL);
+ if (!priv->rx_descs)
+ goto err_out;
+
+ priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
+ if (!priv->rx_bufs)
+ goto err_out;
+
+ for (i = 0; i < n_rx; i++) {
+ err = nb8800_alloc_rx(dev, i, false);
+ if (err)
+ goto err_out;
+ }
+
+ priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
+ &priv->tx_desc_dma, GFP_KERNEL);
+ if (!priv->tx_descs)
+ goto err_out;
+
+ priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
+ if (!priv->tx_bufs)
+ goto err_out;
+
+ for (i = 0; i < n_tx; i++)
+ priv->tx_bufs[i].dma_desc =
+ priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
+
+ nb8800_dma_reset(dev);
+
+ return 0;
+
+err_out:
+ nb8800_dma_free(dev);
+
+ return -ENOMEM;
+}
+
+static int nb8800_dma_stop(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
+ struct nb8800_tx_desc *txd = &priv->tx_descs[0];
+ int retry = 5;
+ u32 txcr;
+ u32 rxcr;
+ int err;
+ unsigned int i;
+
+ /* wait for tx to finish */
+ err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
+ !(txcr & TCR_EN) &&
+ priv->tx_done == priv->tx_next,
+ 1000, 1000000);
+ if (err)
+ return err;
+
+ /* The rx DMA only stops if it reaches the end of chain.
+ * To make this happen, we set the EOC flag on all rx
+ * descriptors, put the device in loopback mode, and send
+ * a few dummy frames. The interrupt handler will ignore
+ * these since NAPI is disabled and no real frames are in
+ * the tx queue.
+ */
+
+ for (i = 0; i < RX_DESC_COUNT; i++)
+ priv->rx_descs[i].desc.config |= DESC_EOC;
+
+ txd->desc[0].s_addr =
+ txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
+ txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
+ memset(txd->buf, 0, sizeof(txd->buf));
+
+ nb8800_mac_af(dev, false);
+ nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
+
+ do {
+ nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
+ wmb();
+ nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
+
+ err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
+ rxcr, !(rxcr & RCR_EN),
+ 1000, 100000);
+ } while (err && --retry);
+
+ nb8800_mac_af(dev, true);
+ nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
+ nb8800_dma_reset(dev);
+
+ return retry ? 0 : -ETIMEDOUT;
+}
+
+static void nb8800_pause_adv(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ u32 adv = 0;
+
+ if (!priv->phydev)
+ return;
+
+ if (priv->pause_rx)
+ adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (priv->pause_tx)
+ adv ^= ADVERTISED_Asym_Pause;
+
+ priv->phydev->supported |= adv;
+ priv->phydev->advertising |= adv;
+}
+
+static int nb8800_open(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* clear any pending interrupts */
+ nb8800_writel(priv, NB8800_RXC_SR, 0xf);
+ nb8800_writel(priv, NB8800_TXC_SR, 0xf);
+
+ err = nb8800_dma_init(dev);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
+ if (err)
+ goto err_free_dma;
+
+ nb8800_mac_rx(dev, true);
+ nb8800_mac_tx(dev, true);
+
+ priv->phydev = of_phy_connect(dev, priv->phy_node,
+ nb8800_link_reconfigure, 0,
+ priv->phy_mode);
+ if (!priv->phydev)
+ goto err_free_irq;
+
+ nb8800_pause_adv(dev);
+
+ netdev_reset_queue(dev);
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ nb8800_start_rx(dev);
+ phy_start(priv->phydev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_free_dma:
+ nb8800_dma_free(dev);
+
+ return err;
+}
+
+static int nb8800_stop(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ phy_stop(priv->phydev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ nb8800_dma_stop(dev);
+ nb8800_mac_rx(dev, false);
+ nb8800_mac_tx(dev, false);
+
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+
+ free_irq(dev->irq, dev);
+
+ nb8800_dma_free(dev);
+
+ return 0;
+}
+
+static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static const struct net_device_ops nb8800_netdev_ops = {
+ .ndo_open = nb8800_open,
+ .ndo_stop = nb8800_stop,
+ .ndo_start_xmit = nb8800_xmit,
+ .ndo_set_mac_address = nb8800_set_mac_address,
+ .ndo_set_rx_mode = nb8800_set_rx_mode,
+ .ndo_do_ioctl = nb8800_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int nb8800_nway_reset(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return genphy_restart_aneg(priv->phydev);
+}
+
+static void nb8800_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pp)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ pp->autoneg = priv->pause_aneg;
+ pp->rx_pause = priv->pause_rx;
+ pp->tx_pause = priv->pause_tx;
+}
+
+static int nb8800_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pp)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ priv->pause_aneg = pp->autoneg;
+ priv->pause_rx = pp->rx_pause;
+ priv->pause_tx = pp->tx_pause;
+
+ nb8800_pause_adv(dev);
+
+ if (!priv->pause_aneg)
+ nb8800_pause_config(dev);
+ else if (priv->phydev)
+ phy_start_aneg(priv->phydev);
+
+ return 0;
+}
+
+static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
+ "rx_bytes_ok",
+ "rx_frames_ok",
+ "rx_undersize_frames",
+ "rx_fragment_frames",
+ "rx_64_byte_frames",
+ "rx_127_byte_frames",
+ "rx_255_byte_frames",
+ "rx_511_byte_frames",
+ "rx_1023_byte_frames",
+ "rx_max_size_frames",
+ "rx_oversize_frames",
+ "rx_bad_fcs_frames",
+ "rx_broadcast_frames",
+ "rx_multicast_frames",
+ "rx_control_frames",
+ "rx_pause_frames",
+ "rx_unsup_control_frames",
+ "rx_align_error_frames",
+ "rx_overrun_frames",
+ "rx_jabber_frames",
+ "rx_bytes",
+ "rx_frames",
+
+ "tx_bytes_ok",
+ "tx_frames_ok",
+ "tx_64_byte_frames",
+ "tx_127_byte_frames",
+ "tx_255_byte_frames",
+ "tx_511_byte_frames",
+ "tx_1023_byte_frames",
+ "tx_max_size_frames",
+ "tx_oversize_frames",
+ "tx_broadcast_frames",
+ "tx_multicast_frames",
+ "tx_control_frames",
+ "tx_pause_frames",
+ "tx_underrun_frames",
+ "tx_single_collision_frames",
+ "tx_multi_collision_frames",
+ "tx_deferred_collision_frames",
+ "tx_late_collision_frames",
+ "tx_excessive_collision_frames",
+ "tx_bytes",
+ "tx_frames",
+ "tx_collisions",
+};
+
+#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
+
+static int nb8800_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return NB8800_NUM_STATS;
+
+ return -EOPNOTSUPP;
+}
+
+static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
+}
+
+static u32 nb8800_read_stat(struct net_device *dev, int index)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+
+ nb8800_writeb(priv, NB8800_STAT_INDEX, index);
+
+ return nb8800_readl(priv, NB8800_STAT_DATA);
+}
+
+static void nb8800_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *st)
+{
+ unsigned int i;
+ u32 rx, tx;
+
+ for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
+ rx = nb8800_read_stat(dev, i);
+ tx = nb8800_read_stat(dev, i | 0x80);
+ st[i] = rx;
+ st[i + NB8800_NUM_STATS / 2] = tx;
+ }
+}
+
+static const struct ethtool_ops nb8800_ethtool_ops = {
+ .get_settings = nb8800_get_settings,
+ .set_settings = nb8800_set_settings,
+ .nway_reset = nb8800_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = nb8800_get_pauseparam,
+ .set_pauseparam = nb8800_set_pauseparam,
+ .get_sset_count = nb8800_get_sset_count,
+ .get_strings = nb8800_get_strings,
+ .get_ethtool_stats = nb8800_get_ethtool_stats,
+};
+
+static int nb8800_hw_init(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ u32 val;
+
+ val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
+ nb8800_writeb(priv, NB8800_TX_CTL1, val);
+
+ /* Collision retry count */
+ nb8800_writeb(priv, NB8800_TX_CTL2, 5);
+
+ val = RX_PAD_STRIP | RX_AF_EN;
+ nb8800_writeb(priv, NB8800_RX_CTL, val);
+
+ /* Chosen by fair dice roll */
+ nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
+
+ /* TX cycles per deferral period */
+ nb8800_writeb(priv, NB8800_TX_SDP, 12);
+
+ /* The following three threshold values have been
+ * experimentally determined for good results.
+ */
+
+ /* RX/TX FIFO threshold for partial empty (64-bit entries) */
+ nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
+
+ /* RX/TX FIFO threshold for partial full (64-bit entries) */
+ nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
+
+ /* Buffer size for transmit (64-bit entries) */
+ nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
+
+ /* Configure tx DMA */
+
+ val = nb8800_readl(priv, NB8800_TXC_CR);
+ val &= TCR_LE; /* keep endian setting */
+ val |= TCR_DM; /* DMA descriptor mode */
+ val |= TCR_RS; /* automatically store tx status */
+ val |= TCR_DIE; /* interrupt on DMA chain completion */
+ val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
+ val |= TCR_BTS(2); /* 32-byte bus transaction size */
+ nb8800_writel(priv, NB8800_TXC_CR, val);
+
+ /* TX complete interrupt after 10 ms or 7 frames (see above) */
+ val = clk_get_rate(priv->clk) / 100;
+ nb8800_writel(priv, NB8800_TX_ITR, val);
+
+ /* Configure rx DMA */
+
+ val = nb8800_readl(priv, NB8800_RXC_CR);
+ val &= RCR_LE; /* keep endian setting */
+ val |= RCR_DM; /* DMA descriptor mode */
+ val |= RCR_RS; /* automatically store rx status */
+ val |= RCR_DIE; /* interrupt at end of DMA chain */
+ val |= RCR_RFI(7); /* interrupt after 7 frames received */
+ val |= RCR_BTS(2); /* 32-byte bus transaction size */
+ nb8800_writel(priv, NB8800_RXC_CR, val);
+
+ /* The rx interrupt can fire before the DMA has completed
+ * unless a small delay is added. 50 us is hopefully enough.
+ */
+ priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
+
+ /* In NAPI poll mode we want to disable interrupts, but the
+ * hardware does not permit this. Delay 10 ms instead.
+ */
+ priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
+
+ nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
+
+ priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
+
+ /* Flow control settings */
+
+ /* Pause time of 0.1 ms */
+ val = 100000 / 512;
+ nb8800_writeb(priv, NB8800_PQ1, val >> 8);
+ nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
+
+ /* Auto-negotiate by default */
+ priv->pause_aneg = true;
+ priv->pause_rx = true;
+ priv->pause_tx = true;
+
+ nb8800_mc_init(dev, 0);
+
+ return 0;
+}
+
+static int nb8800_tangox_init(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ u32 pad_mode = PAD_MODE_MII;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ pad_mode = PAD_MODE_MII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ pad_mode = PAD_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+ break;
+
+ default:
+ dev_err(dev->dev.parent, "unsupported phy mode %s\n",
+ phy_modes(priv->phy_mode));
+ return -EINVAL;
+ }
+
+ nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
+
+ return 0;
+}
+
+static int nb8800_tangox_reset(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ int clk_div;
+
+ nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
+ usleep_range(1000, 10000);
+ nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
+
+ wmb(); /* ensure reset is cleared before proceeding */
+
+ clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
+ nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
+
+ return 0;
+}
+
+static const struct nb8800_ops nb8800_tangox_ops = {
+ .init = nb8800_tangox_init,
+ .reset = nb8800_tangox_reset,
+};
+
+static int nb8800_tango4_init(struct net_device *dev)
+{
+ struct nb8800_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = nb8800_tangox_init(dev);
+ if (err)
+ return err;
+
+ /* On tango4 interrupt on DMA completion per frame works and gives
+ * better performance despite generating more rx interrupts.
+ */
+
+ /* Disable unnecessary interrupt on rx completion */
+ nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
+
+ /* Request interrupt on descriptor DMA completion */
+ priv->rx_dma_config |= DESC_ID;
+
+ return 0;
+}
+
+static const struct nb8800_ops nb8800_tango4_ops = {
+ .init = nb8800_tango4_init,
+ .reset = nb8800_tangox_reset,
+};
+
+static const struct of_device_id nb8800_dt_ids[] = {
+ {
+ .compatible = "aurora,nb8800",
+ },
+ {
+ .compatible = "sigma,smp8642-ethernet",
+ .data = &nb8800_tangox_ops,
+ },
+ {
+ .compatible = "sigma,smp8734-ethernet",
+ .data = &nb8800_tango4_ops,
+ },
+ { }
+};
+
+static int nb8800_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct nb8800_ops *ops = NULL;
+ struct nb8800_priv *priv;
+ struct resource *res;
+ struct net_device *dev;
+ struct mii_bus *bus;
+ const unsigned char *mac;
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ match = of_match_device(nb8800_dt_ids, &pdev->dev);
+ if (match)
+ ops = match->data;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "No IRQ\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
+
+ dev = alloc_etherdev(sizeof(*priv));
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+ priv->base = base;
+
+ priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ if (priv->phy_mode < 0)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto err_free_dev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_free_dev;
+
+ spin_lock_init(&priv->tx_lock);
+
+ if (ops && ops->reset) {
+ ret = ops->reset(dev);
+ if (ret)
+ goto err_free_dev;
+ }
+
+ bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+
+ bus->name = "nb8800-mii";
+ bus->read = nb8800_mdio_read;
+ bus->write = nb8800_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
+ (unsigned long)res->start);
+ bus->priv = priv;
+
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register MII bus\n");
+ goto err_disable_clk;
+ }
+
+ priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ dev_err(&pdev->dev, "no PHY specified\n");
+ ret = -ENODEV;
+ goto err_free_bus;
+ }
+
+ priv->mii_bus = bus;
+
+ ret = nb8800_hw_init(dev);
+ if (ret)
+ goto err_free_bus;
+
+ if (ops && ops->init) {
+ ret = ops->init(dev);
+ if (ret)
+ goto err_free_bus;
+ }
+
+ dev->netdev_ops = &nb8800_netdev_ops;
+ dev->ethtool_ops = &nb8800_ethtool_ops;
+ dev->flags |= IFF_MULTICAST;
+ dev->irq = irq;
+
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ ether_addr_copy(dev->dev_addr, mac);
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ eth_hw_addr_random(dev);
+
+ nb8800_update_mac_addr(dev);
+
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ netdev_err(dev, "failed to register netdev\n");
+ goto err_free_dma;
+ }
+
+ netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
+
+ netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
+
+ return 0;
+
+err_free_dma:
+ nb8800_dma_free(dev);
+err_free_bus:
+ mdiobus_unregister(bus);
+err_disable_clk:
+ clk_disable_unprepare(priv->clk);
+err_free_dev:
+ free_netdev(dev);
+
+ return ret;
+}
+
+static int nb8800_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct nb8800_priv *priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ mdiobus_unregister(priv->mii_bus);
+
+ clk_disable_unprepare(priv->clk);
+
+ nb8800_dma_free(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver nb8800_driver = {
+ .driver = {
+ .name = "nb8800",
+ .of_match_table = nb8800_dt_ids,
+ },
+ .probe = nb8800_probe,
+ .remove = nb8800_remove,
+};
+
+module_platform_driver(nb8800_driver);
+
+MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
+MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
new file mode 100644
index 0000000..e5adbc2
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -0,0 +1,316 @@
+#ifndef _NB8800_H_
+#define _NB8800_H_
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/bitops.h>
+
+#define RX_DESC_COUNT 256
+#define TX_DESC_COUNT 256
+
+#define NB8800_DESC_LOW 4
+
+#define RX_BUF_SIZE 1552
+
+#define RX_COPYBREAK 256
+#define RX_COPYHDR 128
+
+#define MAX_MDC_CLOCK 2500000
+
+/* Stargate Solutions SSN8800 core registers */
+#define NB8800_TX_CTL1 0x000
+#define TX_TPD BIT(5)
+#define TX_APPEND_FCS BIT(4)
+#define TX_PAD_EN BIT(3)
+#define TX_RETRY_EN BIT(2)
+#define TX_EN BIT(0)
+
+#define NB8800_TX_CTL2 0x001
+
+#define NB8800_RX_CTL 0x004
+#define RX_BC_DISABLE BIT(7)
+#define RX_RUNT BIT(6)
+#define RX_AF_EN BIT(5)
+#define RX_PAUSE_EN BIT(3)
+#define RX_SEND_CRC BIT(2)
+#define RX_PAD_STRIP BIT(1)
+#define RX_EN BIT(0)
+
+#define NB8800_RANDOM_SEED 0x008
+#define NB8800_TX_SDP 0x14
+#define NB8800_TX_TPDP1 0x18
+#define NB8800_TX_TPDP2 0x19
+#define NB8800_SLOT_TIME 0x1c
+
+#define NB8800_MDIO_CMD 0x020
+#define MDIO_CMD_GO BIT(31)
+#define MDIO_CMD_WR BIT(26)
+#define MDIO_CMD_ADDR(x) ((x) << 21)
+#define MDIO_CMD_REG(x) ((x) << 16)
+#define MDIO_CMD_DATA(x) ((x) << 0)
+
+#define NB8800_MDIO_STS 0x024
+#define MDIO_STS_ERR BIT(31)
+
+#define NB8800_MC_ADDR(i) (0x028 + (i))
+#define NB8800_MC_INIT 0x02e
+#define NB8800_UC_ADDR(i) (0x03c + (i))
+
+#define NB8800_MAC_MODE 0x044
+#define RGMII_MODE BIT(7)
+#define HALF_DUPLEX BIT(4)
+#define BURST_EN BIT(3)
+#define LOOPBACK_EN BIT(2)
+#define GMAC_MODE BIT(0)
+
+#define NB8800_IC_THRESHOLD 0x050
+#define NB8800_PE_THRESHOLD 0x051
+#define NB8800_PF_THRESHOLD 0x052
+#define NB8800_TX_BUFSIZE 0x054
+#define NB8800_FIFO_CTL 0x056
+#define NB8800_PQ1 0x060
+#define NB8800_PQ2 0x061
+#define NB8800_SRC_ADDR(i) (0x06a + (i))
+#define NB8800_STAT_DATA 0x078
+#define NB8800_STAT_INDEX 0x07c
+#define NB8800_STAT_CLEAR 0x07d
+
+#define NB8800_SLEEP_MODE 0x07e
+#define SLEEP_MODE BIT(0)
+
+#define NB8800_WAKEUP 0x07f
+#define WAKEUP BIT(0)
+
+/* Aurora NB8800 host interface registers */
+#define NB8800_TXC_CR 0x100
+#define TCR_LK BIT(12)
+#define TCR_DS BIT(11)
+#define TCR_BTS(x) (((x) & 0x7) << 8)
+#define TCR_DIE BIT(7)
+#define TCR_TFI(x) (((x) & 0x7) << 4)
+#define TCR_LE BIT(3)
+#define TCR_RS BIT(2)
+#define TCR_DM BIT(1)
+#define TCR_EN BIT(0)
+
+#define NB8800_TXC_SR 0x104
+#define TSR_DE BIT(3)
+#define TSR_DI BIT(2)
+#define TSR_TO BIT(1)
+#define TSR_TI BIT(0)
+
+#define NB8800_TX_SAR 0x108
+#define NB8800_TX_DESC_ADDR 0x10c
+
+#define NB8800_TX_REPORT_ADDR 0x110
+#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
+#define TX_FIRST_DEFERRAL BIT(7)
+#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
+#define TX_LATE_COLLISION BIT(2)
+#define TX_PACKET_DROPPED BIT(1)
+#define TX_FIFO_UNDERRUN BIT(0)
+#define IS_TX_ERROR(r) ((r) & 0x07)
+
+#define NB8800_TX_FIFO_SR 0x114
+#define NB8800_TX_ITR 0x118
+
+#define NB8800_RXC_CR 0x200
+#define RCR_FL BIT(13)
+#define RCR_LK BIT(12)
+#define RCR_DS BIT(11)
+#define RCR_BTS(x) (((x) & 7) << 8)
+#define RCR_DIE BIT(7)
+#define RCR_RFI(x) (((x) & 7) << 4)
+#define RCR_LE BIT(3)
+#define RCR_RS BIT(2)
+#define RCR_DM BIT(1)
+#define RCR_EN BIT(0)
+
+#define NB8800_RXC_SR 0x204
+#define RSR_DE BIT(3)
+#define RSR_DI BIT(2)
+#define RSR_RO BIT(1)
+#define RSR_RI BIT(0)
+
+#define NB8800_RX_SAR 0x208
+#define NB8800_RX_DESC_ADDR 0x20c
+
+#define NB8800_RX_REPORT_ADDR 0x210
+#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
+#define RX_MULTICAST_PKT BIT(9)
+#define RX_BROADCAST_PKT BIT(8)
+#define RX_LENGTH_ERR BIT(7)
+#define RX_FCS_ERR BIT(6)
+#define RX_RUNT_PKT BIT(5)
+#define RX_FIFO_OVERRUN BIT(4)
+#define RX_LATE_COLLISION BIT(3)
+#define RX_ALIGNMENT_ERROR BIT(2)
+#define RX_ERROR_MASK 0xfc
+#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
+
+#define NB8800_RX_FIFO_SR 0x214
+#define NB8800_RX_ITR 0x218
+
+/* Sigma Designs SMP86xx additional registers */
+#define NB8800_TANGOX_PAD_MODE 0x400
+#define PAD_MODE_MASK 0x7
+#define PAD_MODE_MII 0x0
+#define PAD_MODE_RGMII 0x1
+#define PAD_MODE_GTX_CLK_INV BIT(3)
+#define PAD_MODE_GTX_CLK_DELAY BIT(4)
+
+#define NB8800_TANGOX_MDIO_CLKDIV 0x420
+#define NB8800_TANGOX_RESET 0x424
+
+/* Hardware DMA descriptor */
+struct nb8800_dma_desc {
+ u32 s_addr; /* start address */
+ u32 n_addr; /* next descriptor address */
+ u32 r_addr; /* report address */
+ u32 config;
+} __aligned(8);
+
+#define DESC_ID BIT(23)
+#define DESC_EOC BIT(22)
+#define DESC_EOF BIT(21)
+#define DESC_LK BIT(20)
+#define DESC_DS BIT(19)
+#define DESC_BTS(x) (((x) & 0x7) << 16)
+
+/* DMA descriptor and associated data for rx.
+ * Allocated from coherent memory.
+ */
+struct nb8800_rx_desc {
+ /* DMA descriptor */
+ struct nb8800_dma_desc desc;
+
+ /* Status report filled in by hardware */
+ u32 report;
+};
+
+/* Address of buffer on rx ring */
+struct nb8800_rx_buf {
+ struct page *page;
+ unsigned long offset;
+};
+
+/* DMA descriptors and associated data for tx.
+ * Allocated from coherent memory.
+ */
+struct nb8800_tx_desc {
+ /* DMA descriptor. The second descriptor is used if packet
+ * data is unaligned.
+ */
+ struct nb8800_dma_desc desc[2];
+
+ /* Status report filled in by hardware */
+ u32 report;
+
+ /* Bounce buffer for initial unaligned part of packet */
+ u8 buf[8] __aligned(8);
+};
+
+/* Packet in tx queue */
+struct nb8800_tx_buf {
+ /* Currently queued skb */
+ struct sk_buff *skb;
+
+ /* DMA address of the first descriptor */
+ dma_addr_t dma_desc;
+
+ /* DMA address of packet data */
+ dma_addr_t dma_addr;
+
+ /* Length of DMA mapping, less than skb->len if alignment
+ * buffer is used.
+ */
+ unsigned int dma_len;
+
+ /* Number of packets in chain starting here */
+ unsigned int chain_len;
+
+ /* Packet chain ready to be submitted to hardware */
+ bool ready;
+};
+
+struct nb8800_priv {
+ struct napi_struct napi;
+
+ void __iomem *base;
+
+ /* RX DMA descriptors */
+ struct nb8800_rx_desc *rx_descs;
+
+ /* RX buffers referenced by DMA descriptors */
+ struct nb8800_rx_buf *rx_bufs;
+
+ /* Current end of chain */
+ u32 rx_eoc;
+
+ /* Value for rx interrupt time register in NAPI interrupt mode */
+ u32 rx_itr_irq;
+
+ /* Value for rx interrupt time register in NAPI poll mode */
+ u32 rx_itr_poll;
+
+ /* Value for config field of rx DMA descriptors */
+ u32 rx_dma_config;
+
+ /* TX DMA descriptors */
+ struct nb8800_tx_desc *tx_descs;
+
+ /* TX packet queue */
+ struct nb8800_tx_buf *tx_bufs;
+
+ /* Number of free tx queue entries */
+ atomic_t tx_free;
+
+ /* First free tx queue entry */
+ u32 tx_next;
+
+ /* Next buffer to transmit */
+ u32 tx_queue;
+
+ /* Start of current packet chain */
+ struct nb8800_tx_buf *tx_chain;
+
+ /* Next buffer to reclaim */
+ u32 tx_done;
+
+ /* Lock for DMA activation */
+ spinlock_t tx_lock;
+
+ struct mii_bus *mii_bus;
+ struct device_node *phy_node;
+ struct phy_device *phydev;
+
+ /* PHY connection type from DT */
+ int phy_mode;
+
+ /* Current link status */
+ int speed;
+ int duplex;
+ int link;
+
+ /* Pause settings */
+ bool pause_aneg;
+ bool pause_rx;
+ bool pause_tx;
+
+ /* DMA base address of rx descriptors, see rx_descs above */
+ dma_addr_t rx_desc_dma;
+
+ /* DMA base address of tx descriptors, see tx_descs above */
+ dma_addr_t tx_desc_dma;
+
+ struct clk *clk;
+};
+
+struct nb8800_ops {
+ int (*init)(struct net_device *dev);
+ int (*reset)(struct net_device *dev);
+};
+
+#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9b0367..2e611dc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
return;
}
- bp->vxlan_dst_port--;
- if (bp->vxlan_dst_port)
+ bp->vxlan_dst_port_count--;
+ if (bp->vxlan_dst_port_count)
return;
if (netif_running(bp->dev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index db15c5e..bdf094f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3625,6 +3625,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -3648,8 +3649,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->fw_fid = le16_to_cpu(resp->fid);
memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
- if (!is_valid_ether_addr(vf->mac_addr))
- random_ether_addr(vf->mac_addr);
+ if (is_valid_ether_addr(vf->mac_addr))
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ else
+ random_ether_addr(bp->dev->dev_addr);
vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3880,6 +3884,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif
}
+static int bnxt_cfg_rx_mode(struct bnxt *);
+
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
int rc = 0;
@@ -3946,11 +3952,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
bp->vnic_info[0].rx_mask |=
CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
- rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc) {
- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ rc = bnxt_cfg_rx_mode(bp);
+ if (rc)
goto err_out;
- }
rc = bnxt_hwrm_set_coal(bp);
if (rc)
@@ -4865,7 +4869,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
}
}
-static void bnxt_cfg_rx_mode(struct bnxt *bp)
+static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -4914,6 +4918,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp)
netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
rc);
vnic->uc_filter_count = i;
+ return rc;
}
}
@@ -4922,6 +4927,8 @@ skip_uc:
if (rc)
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
rc);
+
+ return rc;
}
static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -5212,13 +5219,27 @@ init_err:
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
+ return -EADDRNOTAVAIL;
+#endif
+
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ if (netif_running(dev)) {
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ }
- return 0;
+ return rc;
}
/* rtnl_lock held */
@@ -5686,15 +5707,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
dflt_rings = netif_get_num_default_rss_queues();
- if (BNXT_PF(bp)) {
- memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ if (BNXT_PF(bp))
bp->pf.max_irqs = max_irqs;
- } else {
#if defined(CONFIG_BNXT_SRIOV)
- memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ else
bp->vf.max_irqs = max_irqs;
#endif
- }
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f4cf688..7a9af28 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp)
if (!is_valid_ether_addr(resp->perm_mac_address))
goto update_vf_mac_exit;
- if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
- goto update_vf_mac_exit;
-
- memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+ memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ /* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 88c1e1a..169059c 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp)
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev)
/* Set MII management clock divider */
val = macb_mdc_clk_div(bp);
val |= macb_dbw(bp);
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
macb_writel(bp, NCFGR, val);
return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 6e1faea..d83b0db 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -215,12 +215,17 @@
/* GEM specific NCFGR bitfields. */
#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
#define GEM_GBE_SIZE 1
+#define GEM_PCSSEL_OFFSET 11
+#define GEM_PCSSEL_SIZE 1
#define GEM_CLK_OFFSET 18 /* MDC clock division */
#define GEM_CLK_SIZE 3
#define GEM_DBW_OFFSET 21 /* Data bus width */
#define GEM_DBW_SIZE 2
#define GEM_RXCOEN_OFFSET 24
#define GEM_RXCOEN_SIZE 1
+#define GEM_SGMIIEN_OFFSET 27
+#define GEM_SGMIIEN_SIZE 1
+
/* Constants for data bus width. */
#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index d3950b2..39ca674 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -120,10 +120,9 @@
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
*
- * 1 tick per 0.05usec = value of 2.2
- * This 10% would be covered in CQ timer thresh value
+ * 1 tick per 0.025usec
*/
-#define NICPF_CLK_PER_INT_TICK 2
+#define NICPF_CLK_PER_INT_TICK 1
/* Time to wait before we decide that a SQ is stuck.
*
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index c561fdc..4b7fd63 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -37,6 +37,7 @@ struct nicpf {
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
u8 vf_lmac_map[MAX_LMAC];
+ u8 lmac_cnt;
struct delayed_work dwork;
struct workqueue_struct *check_link;
u8 link[MAX_LMAC];
@@ -279,6 +280,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
u64 lmac_credit;
nic->num_vf_en = 0;
+ nic->lmac_cnt = 0;
for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
if (!(bgx_map & (1 << bgx)))
@@ -288,6 +290,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic->vf_lmac_map[next_bgx_lmac++] =
NIC_SET_VF_LMAC_MAP(bgx, lmac);
nic->num_vf_en += lmac_cnt;
+ nic->lmac_cnt += lmac_cnt;
/* Program LMAC credits */
lmac_credit = (1ull << 1); /* channel credit enable */
@@ -715,6 +718,13 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
nic->vf_enabled[vf] = true;
+ if (vf >= nic->lmac_cnt)
+ goto unlock;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
goto unlock;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
@@ -722,6 +732,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
if (vf >= nic->num_vf_en)
nic->sqs_used[vf - nic->num_vf_en] = false;
nic->pqs_vf[vf] = 0;
+
+ if (vf >= nic->lmac_cnt)
+ break;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -940,7 +958,7 @@ static void nic_poll_for_link(struct work_struct *work)
mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
- for (vf = 0; vf < nic->num_vf_en; vf++) {
+ for (vf = 0; vf < nic->lmac_cnt; vf++) {
/* Poll only if VF is UP */
if (!nic->vf_enabled[vf])
continue;
@@ -1074,8 +1092,7 @@ static void nic_remove(struct pci_dev *pdev)
if (nic->check_link) {
/* Destroy work Queue */
- cancel_delayed_work(&nic->dwork);
- flush_workqueue(nic->check_link);
+ cancel_delayed_work_sync(&nic->dwork);
destroy_workqueue(nic->check_link);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index af54c10..a12b2e3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev,
cmd->supported = 0;
cmd->transceiver = XCVR_EXTERNAL;
+
+ if (!nic->link_up) {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ return 0;
+ }
+
if (nic->speed <= 1000) {
cmd->port = PORT_MII;
cmd->autoneg = AUTONEG_ENABLE;
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev,
return 0;
}
+static u32 nicvf_get_link(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->link_up;
+}
+
static void nicvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev,
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_settings = nicvf_get_settings,
- .get_link = ethtool_op_get_link,
+ .get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
.get_msglevel = nicvf_get_msglevel,
.set_msglevel = nicvf_set_msglevel,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 7f709cb..dde8dc7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_stop_all_queues(nic->netdev);
+ nic->link_up = false;
/* Teardown secondary qsets first */
if (!nic->sqs_mode) {
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev)
nic->drv_stats.txq_stop = 0;
nic->drv_stats.txq_wake = 0;
- netif_carrier_on(netdev);
- netif_tx_start_all_queues(netdev);
-
return 0;
cleanup:
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index e404ea8..206b6a7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
- qidx, nic->cq_coalesce_usecs);
+ qidx, CMP_QUEUE_TIMER_THRESH);
}
/* Configures transmit queue */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index fb4957d..033e830 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -76,7 +76,7 @@
#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
-#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 180aa9f..9df26c2 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
}
EXPORT_SYMBOL(bgx_set_lmac_mac);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ u64 cfg;
+
+ if (!bgx)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ if (enable)
+ cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+ else
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+}
+EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+
static void bgx_sgmii_change_link_state(struct lmac *lmac)
{
struct bgx *bgx = lmac->bgx;
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work)
lmac->last_duplex = 1;
} else {
lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
}
if (lmac->last_link != lmac->link_up) {
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
/* Enable lmac */
- bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
- CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
/* Restore default cfg, incase low level firmware changed it */
bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
/* Destroy work queue */
- cancel_delayed_work(&lmac->dwork);
- flush_workqueue(lmac->check_link);
+ cancel_delayed_work_sync(&lmac->dwork);
destroy_workqueue(lmac->check_link);
}
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bgx *bgx = NULL;
u8 lmac;
+ /* Load octeon mdio driver */
+ octeon_mdiobus_force_mod_depencency();
+
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
if (!bgx)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 07b7ec66..149e179 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -182,6 +182,8 @@ enum MCAST_MODE {
#define BCAST_ACCEPT 1
#define CAM_ACCEPT 1
+void octeon_mdiobus_force_mod_depencency(void);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index ed41559..b553409 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800;
#elif defined(__mips__)
static int csr0 = 0x00200000 | 0x4000;
#else
-#warning Processor architecture undefined!
-static int csr0 = 0x00A00000 | 0x4800;
+static int csr0;
#endif
/* Operational parameters that usually are not changed. */
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void)
pr_info("%s", version);
#endif
+ if (!csr0) {
+ pr_warn("tulip: unknown CPU architecture, using default csr0\n");
+ /* default to 8 longword cache line alignment */
+ csr0 = 0x00A00000 | 0x4800;
+ }
+
/* copy module parms into globals */
tulip_rx_copybreak = rx_copybreak;
tulip_max_interrupt_work = max_interrupt_work;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 9beb3d3..3c0e4d5 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev)
#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
i |= 0x4800;
#else
-#warning Processor architecture undefined
+ dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
i |= 0x4800;
#endif
iowrite32(i, ioaddr + PCIBusCfg);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ff76d4e..bee32a9 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE
default y
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
+ ARCH_LAYERSCAPE
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3e6b9b4..7cf8984 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np,
if (model && strcasecmp(model, "FEC")) {
gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
- if (gfar_irq(grp, TX)->irq == NO_IRQ ||
- gfar_irq(grp, RX)->irq == NO_IRQ ||
- gfar_irq(grp, ER)->irq == NO_IRQ)
+ if (!gfar_irq(grp, TX)->irq ||
+ !gfar_irq(grp, RX)->irq ||
+ !gfar_irq(grp, ER)->irq)
return -EINVAL;
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 664d0c2..b40fba9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
etsects->irq = platform_get_irq(dev, 0);
- if (etsects->irq == NO_IRQ) {
+ if (etsects->irq < 0) {
pr_err("irq not in device tree\n");
goto no_node;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 639263d..7781e80 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
/* verify the skb head is not shared */
err = skb_cow_head(skb, 0);
- if (err)
+ if (err) {
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
+ }
/* locate vlan header */
vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e84c7f2..ed622fa 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -36,7 +36,7 @@
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
-#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
+#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
@@ -62,6 +62,7 @@
#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
#define MVNETA_PORT_CONFIG 0x2400
#define MVNETA_UNI_PROMISC_MODE BIT(0)
#define MVNETA_DEF_RXQ(q) ((q) << 1)
@@ -159,7 +160,7 @@
#define MVNETA_INTR_ENABLE 0x25b8
#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
-#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
#define MVNETA_RXQ_CMD 0x2680
#define MVNETA_RXQ_DISABLE_SHIFT 8
@@ -242,6 +243,7 @@
#define MVNETA_VLAN_TAG_LEN 4
#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
+#define MVNETA_TX_CSUM_DEF_SIZE 1600
#define MVNETA_TX_CSUM_MAX_SIZE 9800
#define MVNETA_ACC_MODE_EXT 1
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
- if (!skb)
- goto err_drop_frame;
+ /* After refill old buffer has to be unmapped regardless
+ * the skb is successfully built or not.
+ */
dma_unmap_single(dev->dev.parent, phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+ if (!skb)
+ goto err_drop_frame;
+
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
}
mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+ mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
}
/* Power up the port */
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev)
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
const char *managed;
+ int tx_csum_limit;
int phy_mode;
int err;
int cpu;
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
- if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
- pp->tx_csum_limit = 1600;
+ if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
+ if (tx_csum_limit < 0 ||
+ tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
+ tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+ dev_info(&pdev->dev,
+ "Wrong TX csum limit in DT, set to %dB\n",
+ MVNETA_TX_CSUM_DEF_SIZE);
+ }
+ } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
+ tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+ } else {
+ tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
+ }
+
+ pp->tx_csum_limit = tx_csum_limit;
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b159ef8..0576651 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Get platform resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if ((!res) || (irq < 0) || (irq >= NR_IRQS)) {
+ if (!res || irq < 0) {
dev_err(&pdev->dev, "error getting resources.\n");
ret = -ENXIO;
goto err_exit;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ee8d1ec..ed5da4d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1225,7 +1225,7 @@ static int ravb_open(struct net_device *ndev)
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq;
+ goto out_free_irq2;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
@@ -1243,9 +1243,11 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
ravb_ptp_stop(ndev);
+out_free_irq2:
+ if (priv->chip_id == RCAR_GEN3)
+ free_irq(priv->emac_irq, ndev);
out_free_irq:
free_irq(ndev->irq, ndev);
- free_irq(priv->emac_irq, ndev);
out_napi_off:
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 7f6f4a4..58c05ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
const char *rs;
+ dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
+
err = of_property_read_string(np, "st,tx-retime-src", &rs);
if (err < 0) {
dev_warn(dev, "Use internal clock source\n");
- dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
- } else if (!strcasecmp(rs, "clk_125")) {
- dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
- } else if (!strcasecmp(rs, "txclk")) {
- dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
+ } else {
+ if (!strcasecmp(rs, "clk_125"))
+ dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
+ else if (!strcasecmp(rs, "txclk"))
+ dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
}
-
dwmac->speed = SPEED_1000;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 64d8aa4..3c6549a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_100_150M;
else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
}
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+ /* check if frame_len fits the preallocated memory */
+ if (frame_len > priv->dma_buf_sz) {
+ priv->dev->stats.rx_length_errors++;
+ break;
+ }
+
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
*/
@@ -3102,6 +3108,7 @@ int stmmac_resume(struct net_device *ndev)
init_dma_desc_rings(ndev, GFP_ATOMIC);
stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
+ stmmac_set_rx_mode(ndev);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ebf6abc..bba670c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus)
#ifdef CONFIG_OF
if (priv->device->of_node) {
- int reset_gpio, active_low;
if (data->reset_gpio < 0) {
struct device_node *np = priv->device->of_node;
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus)
"snps,reset-active-low");
of_property_read_u32_array(np,
"snps,reset-delays-us", data->delays, 3);
- }
- reset_gpio = data->reset_gpio;
- active_low = data->active_low;
+ if (gpio_request(data->reset_gpio, "mdio-reset"))
+ return 0;
+ }
- if (!gpio_request(reset_gpio, "mdio-reset")) {
- gpio_direction_output(reset_gpio, active_low ? 1 : 0);
- if (data->delays[0])
- msleep(DIV_ROUND_UP(data->delays[0], 1000));
+ gpio_direction_output(data->reset_gpio,
+ data->active_low ? 1 : 0);
+ if (data->delays[0])
+ msleep(DIV_ROUND_UP(data->delays[0], 1000));
- gpio_set_value(reset_gpio, active_low ? 0 : 1);
- if (data->delays[1])
- msleep(DIV_ROUND_UP(data->delays[1], 1000));
+ gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1);
+ if (data->delays[1])
+ msleep(DIV_ROUND_UP(data->delays[1], 1000));
- gpio_set_value(reset_gpio, active_low ? 1 : 0);
- if (data->delays[2])
- msleep(DIV_ROUND_UP(data->delays[2], 1000));
- }
+ gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0);
+ if (data->delays[2])
+ msleep(DIV_ROUND_UP(data->delays[2], 1000));
}
#endif
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index c08be62..1562ab4 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
{
+ if (of_machine_is_compatible("ti,dm8148"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
if (of_machine_is_compatible("ti,am33xx"))
return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 54036ae..0fc5219 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk)
wait_queue_head_t *wqueue;
if (!sock_writeable(sk) ||
- !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
wqueue = sk_sleep(sk);
@@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(&q->sk) ||
- (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
sock_writeable(&q->sk)))
mask |= POLLOUT | POLLWRNORM;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 07a6119..3ce5d95 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5461, 0xfffffff0 },
{ PHY_ID_BCM54616S, 0xfffffff0 },
{ PHY_ID_BCM5464, 0xfffffff0 },
- { PHY_ID_BCM5482, 0xfffffff0 },
+ { PHY_ID_BCM5481, 0xfffffff0 },
{ PHY_ID_BCM5482, 0xfffffff0 },
{ PHY_ID_BCM50610, 0xfffffff0 },
{ PHY_ID_BCM50610M, 0xfffffff0 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 48ce6ef..47cd306d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
mdiobus_write(phydev->bus, mii_data->phy_id,
mii_data->reg_num, val);
- if (mii_data->reg_num == MII_BMCR &&
+ if (mii_data->phy_id == phydev->addr &&
+ mii_data->reg_num == MII_BMCR &&
val & BMCR_RESET)
return phy_init_hw(phydev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b1878fa..f0db770 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
- (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sock_writeable(sk)))
mask |= POLLOUT | POLLWRNORM;
@@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk)
if (!sock_writeable(sk))
return;
- if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
wqueue = sk_sleep(sk);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a187f08..3b1ba82 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
- const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
u8 *buf;
@@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* parse through descriptors associated with control interface */
cdc_parse_cdc_header(&hdr, intf, buf, len);
- ctx->data = usb_ifnum_to_if(dev->udev,
- hdr.usb_cdc_union_desc->bSlaveInterface0);
+ if (hdr.usb_cdc_union_desc)
+ ctx->data = usb_ifnum_to_if(dev->udev,
+ hdr.usb_cdc_union_desc->bSlaveInterface0);
ctx->ether_desc = hdr.usb_cdc_ether_desc;
ctx->func_desc = hdr.usb_cdc_ncm_desc;
ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
/* some buggy devices have an IAD but no CDC Union */
- if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+ if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34799ea..9a5be8b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -725,6 +725,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 899ea42..4179037 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
&adapter->pdev->dev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ dev_kfree_skb_any(rbi->skb);
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
} else {
/* rx buffer skipped by the device */
}
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
&adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ put_page(rbi->page);
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
} else {
/* rx buffers skipped by the device */
}
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
}
- BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
}
-static void
+static int
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
struct vmxnet3_adapter *adapter)
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ return -EFAULT;
tbi->len = buf_size;
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
buf_offset, buf_size,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ return -EFAULT;
tbi->len = buf_size;
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
+
+ return 0;
}
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
/* fill tx descs related to addr & len */
- vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
+ if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
+ goto unlock_drop_pkt;
/* setup the EOP desc */
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_rx_buf_info *rbi;
struct sk_buff *skb, *new_skb = NULL;
struct page *new_page = NULL;
+ dma_addr_t new_dma_addr;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true;
goto rcd_done;
}
+ new_dma_addr = dma_map_single(&adapter->pdev->dev,
+ new_skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ new_dma_addr)) {
+ dev_kfree_skb(new_skb);
+ /* Skb allocation failed, do not handover this
+ * skb to stack. Reuse it. Drop the existing pkt
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
rbi->len,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */
rbi->skb = new_skb;
- rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
- rbi->skb->data, rbi->len,
- PCI_DMA_FROMDEVICE);
+ rbi->dma_addr = new_dma_addr;
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
if (adapter->version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true;
goto rcd_done;
}
+ new_dma_addr = dma_map_page(&adapter->pdev->dev
+ , rbi->page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ new_dma_addr)) {
+ put_page(new_page);
+ rq->stats.rx_buf_alloc_failure++;
+ dev_kfree_skb(ctx->skb);
+ ctx->skb = NULL;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */
rbi->page = new_page;
- rbi->dma_addr = dma_map_page(&adapter->pdev->dev
- , rbi->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ rbi->dma_addr = new_dma_addr;
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
}
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
PCI_DMA_TODEVICE);
}
- if (new_table_pa) {
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else {
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter),
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
+ dev_err(&pdev->dev, "Failed to map dma\n");
+ err = -EFAULT;
+ goto err_dma_map;
+ }
adapter->shared = dma_alloc_coherent(
&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
@@ -3233,6 +3281,7 @@ err_alloc_queue_desc:
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
+err_dma_map:
free_netdev(netdev);
return err;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 92fa3e1..4f97484 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
- int err;
if (!data || !data[IFLA_VRF_TABLE])
return -EINVAL;
@@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_L3MDEV_MASTER;
- err = register_netdevice(dev);
- if (err < 0)
- goto out_fail;
-
- return 0;
-
-out_fail:
- free_netdev(dev);
- return err;
+ return register_netdevice(dev);
}
static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index e92aaf6..89541cc 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
used = pvc_is_used(pvc);
- if (type == ARPHRD_ETHER) {
+ if (type == ARPHRD_ETHER)
dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
ether_setup);
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- } else
+ else
dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
if (!dev) {
@@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
return -ENOBUFS;
}
- if (type == ARPHRD_ETHER)
+ if (type == ARPHRD_ETHER) {
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
- else {
+ } else {
*(__be16*)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5c47b01..cd39025 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
static int x25_asy_open_tty(struct tty_struct *tty)
{
- struct x25_asy *sl = tty->disc_data;
+ struct x25_asy *sl;
int err;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
- /* First make sure we're not already connected. */
- if (sl && sl->magic == X25_ASY_MAGIC)
- return -EEXIST;
-
/* OK. Find a free X.25 channel to use. */
sl = x25_asy_alloc();
if (sl == NULL)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index aa9bd92..0947cc2 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
.id = QCA988X_HW_2_0_VERSION,
+ .dev_id = QCA988X_2_0_DEVICE_ID,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
{
.id = QCA6174_HW_2_1_VERSION,
+ .dev_id = QCA6164_2_1_DEVICE_ID,
+ .name = "qca6164 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .fw = QCA6174_HW_2_1_FW_FILE,
+ .otp = QCA6174_HW_2_1_OTP_FILE,
+ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
{
.id = QCA6174_HW_3_0_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
{
.id = QCA6174_HW_3_2_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
+ .dev_id = QCA99X0_2_0_DEVICE_ID,
.name = "qca99x0 hw2.0",
.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
.name = "qca9377 hw1.0",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
- .uart_pin = 7,
+ .uart_pin = 6,
.otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .fw = QCA9377_HW_1_0_FW_FILE,
+ .otp = QCA9377_HW_1_0_OTP_FILE,
+ .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .name = "qca9377 hw1.1",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
.fw = QCA9377_HW_1_0_FW_FILE,
@@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
hw_params = &ath10k_hw_params_list[i];
- if (hw_params->id == ar->target_version)
+ if (hw_params->id == ar->target_version &&
+ hw_params->dev_id == ar->dev_id)
break;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 018c64f..858d75f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -636,6 +636,7 @@ struct ath10k {
struct ath10k_hw_params {
u32 id;
+ u16 dev_id;
const char *name;
u32 patch_load_addr;
int uart_pin;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 39966a0..713c2bc 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -22,6 +22,12 @@
#define ATH10K_FW_DIR "ath10k"
+#define QCA988X_2_0_DEVICE_ID (0x003c)
+#define QCA6164_2_1_DEVICE_ID (0x0041)
+#define QCA6174_2_1_DEVICE_ID (0x003e)
+#define QCA99X0_2_0_DEVICE_ID (0x0040)
+#define QCA9377_1_0_DEVICE_ID (0x0042)
+
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
@@ -42,6 +48,10 @@
#define QCA6174_HW_3_0_VERSION 0x05020000
#define QCA6174_HW_3_2_VERSION 0x05030000
+/* QCA9377 target BMI version signatures */
+#define QCA9377_HW_1_0_DEV_VERSION 0x05020000
+#define QCA9377_HW_1_1_DEV_VERSION 0x05020001
+
enum qca6174_pci_rev {
QCA6174_PCI_REV_1_1 = 0x11,
QCA6174_PCI_REV_1_3 = 0x13,
@@ -60,6 +70,11 @@ enum qca6174_chip_id_rev {
QCA6174_HW_3_2_CHIP_ID_REV = 10,
};
+enum qca9377_chip_id_rev {
+ QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
+ QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
+};
+
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
@@ -85,8 +100,6 @@ enum qca6174_chip_id_rev {
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9377 1.0 definitions */
-#define QCA9377_HW_1_0_DEV_VERSION 0x05020001
-#define QCA9377_HW_1_0_CHIP_ID_REV 0x1
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
#define QCA9377_HW_1_0_FW_FILE "firmware.bin"
#define QCA9377_HW_1_0_OTP_FILE "otp.bin"
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a7411fe..95a55405 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
static u32 get_nss_from_chainmask(u16 chain_mask)
{
- if ((chain_mask & 0x15) == 0x15)
+ if ((chain_mask & 0xf) == 0xf)
return 4;
else if ((chain_mask & 0x7) == 0x7)
return 3;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3fca200..930785a 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_TARGET_WAIT 3000
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
-#define QCA988X_2_0_DEVICE_ID (0x003c)
-#define QCA6164_2_1_DEVICE_ID (0x0041)
-#define QCA6174_2_1_DEVICE_ID (0x003e)
-#define QCA99X0_2_0_DEVICE_ID (0x0040)
-#define QCA9377_1_0_DEVICE_ID (0x0042)
-
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
+ { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
-static const struct ce_attr host_ce_config_wlan[] = {
+static struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
@@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
- .recv_cb = ath10k_pci_htc_rx_cb,
+ .recv_cb = ath10k_pci_htt_htc_rx_cb,
},
/* CE2: target->host WMI */
@@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
};
/* Target firmware's Copy Engine configuration. */
-static const struct ce_pipe_config target_ce_config_wlan[] = {
+static struct ce_pipe_config target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
@@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+ ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
/* Called by lower (CE) layer when a send to HTT Target completes. */
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
{
@@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
+static void ath10k_pci_override_ce_config(struct ath10k *ar)
+{
+ struct ce_attr *attr;
+ struct ce_pipe_config *config;
+
+ /* For QCA6174 we're overriding the Copy Engine 5 configuration,
+ * since it is currently used for other feature.
+ */
+
+ /* Override Host's Copy Engine 5 configuration */
+ attr = &host_ce_config_wlan[5];
+ attr->src_sz_max = 0;
+ attr->dest_nentries = 0;
+
+ /* Override Target firmware's Copy Engine configuration */
+ config = &target_ce_config_wlan[5];
+ config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
+ config->nbytes_max = __cpu_to_le32(2048);
+
+ /* Map from service/endpoint to Copy Engine */
+ target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
+}
+
static int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_core_destroy;
}
+ if (QCA_REV_6174(ar))
+ ath10k_pci_override_ce_config(ar);
+
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 1a73c7a..bf88ec3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 17
+#define IWL7260_UCODE_API_MAX 19
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 0116e5a..9bcc0bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 17
+#define IWL8000_UCODE_API_MAX 19
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 85ae902..29ae58e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
* to transmit packets to the AP, i.e. the PTK.
*/
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
- key->hw_key_idx = 0;
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
} else {
/*
* firmware only supports TSC/RSC for a single key,
@@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
* with new ones -- this relies on mac80211 doing
* list_add_tail().
*/
- key->hw_key_idx = 1;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
}
- ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
data->error = ret != 0;
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
*/
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- /* We reprogram keys and shouldn't allocate new key indices */
- memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
-
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
mvm->ptk_ivlen = 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1fb6846..e88afac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ u8 key_offset;
if (iwlwifi_mod_params.sw_crypto) {
IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
@@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
}
+ /* in HW restart reuse the index, otherwise request a new one */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ key_offset = key->hw_key_idx;
+ else
+ key_offset = STA_KEY_IDX_INVALID;
+
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
- ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
- test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status));
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
IWL_WARN(mvm, "set key failed\n");
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 300a249..354acbd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
return max_offs;
}
-static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
+static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
* station ID, then use AP's station ID.
*/
if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
- return mvmvif->ap_sta_id;
+ mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ u8 sta_id = mvmvif->ap_sta_id;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /*
+ * It is possible that the 'sta' parameter is NULL,
+ * for example when a GTK is removed - the sta_id will then
+ * be the AP ID, and no station was passed by mac80211.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ return IWL_MVM_STATION_COUNT;
+
+ return sta_id;
+ }
return IWL_MVM_STATION_COUNT;
}
@@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
struct ieee80211_key_conf *keyconf, bool mcast,
- u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
+ u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+ u8 key_offset)
{
struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
@@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.key_offset = keyconf->hw_key_idx;
+ cmd.key_offset = key_offset;
cmd.key_flags = key_flags;
cmd.sta_id = sta_id;
@@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf,
+ u8 key_offset,
bool mcast)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
- seq.tkip.iv32, p1k, 0);
+ seq.tkip.iv32, p1k, 0, key_offset);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
- 0, NULL, 0);
+ 0, NULL, 0, key_offset);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
- 0, NULL, 0);
+ 0, NULL, 0, key_offset);
}
return ret;
@@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf,
- bool have_key_offset)
+ u8 key_offset)
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
@@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Get the station id from the mvm local station table */
- sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
if (sta_id == IWL_MVM_STATION_COUNT) {
IWL_ERR(mvm, "Failed to find station id\n");
return -EINVAL;
@@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL;
- if (!have_key_offset) {
- /*
- * The D3 firmware hardcodes the PTK offset to 0, so we have to
- * configure it there. As a result, this workaround exists to
- * let the caller set the key offset (hw_key_idx), see d3.c.
- */
- keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
- if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ /* If the key_offset is not pre-assigned, we need to find a
+ * new offset to use. In normal cases, the offset is not
+ * pre-assigned, but during HW_RESTART we want to reuse the
+ * same indices, so we pass them when this function is called.
+ *
+ * In D3 entry, we need to hardcoded the indices (because the
+ * firmware hardcodes the PTK offset to 0). In this case, we
+ * need to make sure we don't overwrite the hw_key_idx in the
+ * keyconf structure, because otherwise we cannot configure
+ * the original ones back when resuming.
+ */
+ if (key_offset == STA_KEY_IDX_INVALID) {
+ key_offset = iwl_mvm_set_fw_key_idx(mvm);
+ if (key_offset == STA_KEY_IDX_INVALID)
return -ENOSPC;
+ keyconf->hw_key_idx = key_offset;
}
- ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
if (ret) {
__clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
goto end;
@@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
*/
if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
- ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
+ key_offset, !mcast);
if (ret) {
__clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
@@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Get the station id from the mvm local station table */
- sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
@@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
return 0;
}
- /*
- * It is possible that the 'sta' parameter is NULL, and thus
- * there is a need to retrieve the sta from the local station table,
- * for example when a GTK is removed (where the sta_id will then be
- * the AP ID, and no station was passed by mac80211.)
- */
- if (!sta) {
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (!sta) {
- IWL_ERR(mvm, "Invalid station id\n");
- return -EINVAL;
- }
- }
-
- if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
- return -EINVAL;
-
ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
if (ret)
return ret;
@@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
u16 *phase1key)
{
struct iwl_mvm_sta *mvm_sta;
- u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
@@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
- iv32, phase1key, CMD_ASYNC);
+ iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index eedb215..0631cc0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- bool have_key_offset);
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset);
int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 644b58b..639761f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 8000 Series */
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 6e9418e..bbb789f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- if (!rtlpci->int_clear)
+ if (rtlpci->int_clear)
rtl8821ae_clear_interrupt(hw);/*clear it here first*/
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 8ee141a..142bdff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
-MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
+MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 87e9d79..3a37bd3 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -421,7 +421,7 @@ static void lowcomms_write_space(struct sock *sk)
if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
con->sock->sk->sk_write_pending--;
- clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
}
if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
@@ -1448,7 +1448,7 @@ static void send_to_sock(struct connection *con)
msg_flags);
if (ret == -EAGAIN || ret == 0) {
if (ret == -EAGAIN &&
- test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) &&
+ test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
!test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
/* Notify TCP that we're limited by the
* application window size.
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index de464e6..83d1926 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -40,6 +40,7 @@ struct bpf_map {
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
+ atomic_t usercnt;
};
struct bpf_map_type_list {
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
-struct bpf_map *bpf_map_get(u32 ufd);
+struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
+void bpf_map_inc(struct bpf_map *map, bool uref);
+void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index cc92268..6ac3cad 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -27,7 +27,7 @@
#ifdef __KERNEL__
extern int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time_t *_expiry);
+ const char *options, char **_result, time64_t *_expiry);
#endif /* KERNEL */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0ef2a97..402753b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -227,7 +227,7 @@ struct ipv6_pinfo {
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist __rcu *ipv6_fl_list;
- struct ipv6_txoptions *opt;
+ struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
diff --git a/include/linux/net.h b/include/linux/net.h
index 70ac5e2..0b4ac7d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -34,8 +34,12 @@ struct inode;
struct file;
struct net;
-#define SOCK_ASYNC_NOSPACE 0
-#define SOCK_ASYNC_WAITDATA 1
+/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
+ * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
+ * Eventually all flags will be in sk->sk_wq_flags.
+ */
+#define SOCKWQ_ASYNC_NOSPACE 0
+#define SOCKWQ_ASYNC_WAITDATA 1
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
@@ -89,6 +93,7 @@ struct socket_wq {
/* Note: wait MUST be first field of socket_wq */
wait_queue_head_t wait;
struct fasync_struct *fasync_list;
+ unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
struct rcu_head rcu;
} ____cacheline_aligned_in_smp;
@@ -96,7 +101,7 @@ struct socket_wq {
* struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc)
* @type: socket type (%SOCK_STREAM, etc)
- * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
+ * @flags: socket flags (%SOCK_NOSPACE, etc)
* @ops: protocol specific socket operations
* @file: File back pointer for gc
* @sk: internal networking protocol agnostic socket representation
@@ -202,7 +207,7 @@ enum {
SOCK_WAKE_URG,
};
-int sock_wake_async(struct socket *sk, int how, int band);
+int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
int __sock_create(struct net *net, int family, int type, int proto,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 67bfac1..3b5d134 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1398,7 +1398,8 @@ enum netdev_priv_flags {
* @dma: DMA channel
* @mtu: Interface MTU value
* @type: Interface hardware type
- * @hard_header_len: Hardware header length
+ * @hard_header_len: Hardware header length, which means that this is the
+ * minimum size of a packet.
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b36d837..2a91a05 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -62,6 +62,7 @@ struct unix_sock {
#define UNIX_GC_CANDIDATE 0
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
+ wait_queue_t peer_wake;
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2bfb2ad..877f682 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
/*
* Store a destination cache entry in a socket
*/
-static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr)
+static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
- struct rt6_info *rt = (struct rt6_info *) dst;
+ np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
sk_setup_caps(sk, dst);
np->daddr_cache = daddr;
#ifdef CONFIG_IPV6_SUBTREES
np->saddr_cache = saddr;
#endif
- np->dst_cookie = rt6_get_cookie(rt);
-}
-
-static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
- struct in6_addr *daddr, struct in6_addr *saddr)
-{
- spin_lock(&sk->sk_dst_lock);
- __ip6_dst_store(sk, dst, daddr, saddr);
- spin_unlock(&sk->sk_dst_lock);
}
static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e1a10b0..9a5c9f0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
*/
struct ipv6_txoptions {
+ atomic_t refcnt;
/* Length of this structure */
int tot_len;
@@ -217,7 +218,7 @@ struct ipv6_txoptions {
struct ipv6_opt_hdr *dst0opt;
struct ipv6_rt_hdr *srcrt; /* Routing Header */
struct ipv6_opt_hdr *dst1opt;
-
+ struct rcu_head rcu;
/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
};
@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
struct rcu_head rcu;
};
+static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
+{
+ struct ipv6_txoptions *opt;
+
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt && !atomic_inc_not_zero(&opt->refcnt))
+ opt = NULL;
+ rcu_read_unlock();
+ return opt;
+}
+
+static inline void txopt_put(struct ipv6_txoptions *opt)
+{
+ if (opt && atomic_dec_and_test(&opt->refcnt))
+ kfree_rcu(opt, rcu);
+}
+
struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
@@ -490,6 +509,7 @@ struct ip6_create_arg {
u32 user;
const struct in6_addr *src;
const struct in6_addr *dst;
+ int iif;
u8 ecn;
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82045fc..760bc4d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags {
* it shouldn't be set.
*
* @max_tx_aggregation_subframes: maximum number of subframes in an
- * aggregate an HT driver will transmit, used by the peer as a
- * hint to size its reorder buffer.
+ * aggregate an HT driver will transmit. Though ADDBA will advertise
+ * a constant value of 64 as some older APs can crash if the window
+ * size is smaller (an example is LinkSys WRT120N with FW v1.0.07
+ * build 002 Jun 18 2012).
*
* @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
* (if %IEEE80211_HW_QUEUE_CONTROL is set)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index bf39374..2d8edaa 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -181,8 +181,7 @@ void ndisc_cleanup(void);
int ndisc_rcv(struct sk_buff *skb);
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct sk_buff *oskb);
+ const struct in6_addr *daddr, const struct in6_addr *saddr);
void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4c79ce8..b2a8e63 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -61,6 +61,9 @@ struct Qdisc {
*/
#define TCQ_F_WARN_NONWC (1 << 16)
#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
+#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
+ * qdisc_tree_decrease_qlen() should stop.
+ */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 495c87e..7bbb710 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,10 +775,10 @@ struct sctp_transport {
hb_sent:1,
/* Is the Path MTU update pending on this tranport */
- pmtu_pending:1;
+ pmtu_pending:1,
- /* Has this transport moved the ctsn since we last sacked */
- __u32 sack_generation;
+ /* Has this transport moved the ctsn since we last sacked */
+ sack_generation:1;
u32 dst_cookie;
struct flowi fl;
@@ -1482,19 +1482,19 @@ struct sctp_association {
prsctp_capable:1, /* Can peer do PR-SCTP? */
auth_capable:1; /* Is peer doing SCTP-AUTH? */
- /* Ack State : This flag indicates if the next received
+ /* sack_needed : This flag indicates if the next received
* : packet is to be responded to with a
- * : SACK. This is initializedto 0. When a packet
- * : is received it is incremented. If this value
+ * : SACK. This is initialized to 0. When a packet
+ * : is received sack_cnt is incremented. If this value
* : reaches 2 or more, a SACK is sent and the
* : value is reset to 0. Note: This is used only
* : when no DATA chunks are received out of
* : order. When DATA chunks are out of order,
* : SACK's are not delayed (see Section 6).
*/
- __u8 sack_needed; /* Do we need to sack the peer? */
+ __u8 sack_needed:1, /* Do we need to sack the peer? */
+ sack_generation:1;
__u32 sack_cnt;
- __u32 sack_generation;
__u32 adaptation_ind; /* Adaptation Code point. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 7f89e4b..52d27ee 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -254,7 +254,6 @@ struct cg_proto;
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
* @sk_dst_cache: destination cache
- * @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
@@ -384,14 +383,16 @@ struct sock {
int sk_rcvbuf;
struct sk_filter __rcu *sk_filter;
- struct socket_wq __rcu *sk_wq;
-
+ union {
+ struct socket_wq __rcu *sk_wq;
+ struct socket_wq *sk_wq_raw;
+ };
#ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
struct dst_entry __rcu *sk_dst_cache;
- spinlock_t sk_dst_lock;
+ /* Note: 32bit hole on 64bit arches */
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@ -2005,10 +2006,27 @@ static inline unsigned long sock_wspace(struct sock *sk)
return amt;
}
-static inline void sk_wake_async(struct sock *sk, int how, int band)
+/* Note:
+ * We use sk->sk_wq_raw, from contexts knowing this
+ * pointer is not NULL and cannot disappear/change.
+ */
+static inline void sk_set_bit(int nr, struct sock *sk)
{
- if (sock_flag(sk, SOCK_FASYNC))
- sock_wake_async(sk->sk_socket, how, band);
+ set_bit(nr, &sk->sk_wq_raw->flags);
+}
+
+static inline void sk_clear_bit(int nr, struct sock *sk)
+{
+ clear_bit(nr, &sk->sk_wq_raw->flags);
+}
+
+static inline void sk_wake_async(const struct sock *sk, int how, int band)
+{
+ if (sock_flag(sk, SOCK_FASYNC)) {
+ rcu_read_lock();
+ sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
+ rcu_read_unlock();
+ }
}
/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3f4c99e..b0799bc 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
attr->value_size == 0)
return ERR_PTR(-EINVAL);
+ if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
+ /* if value_size is bigger, the user space won't be able to
+ * access the elements.
+ */
+ return ERR_PTR(-E2BIG);
+
elem_size = round_up(attr->value_size, 8);
/* check round_up into zero and u32 overflow */
if (elem_size == 0 ||
- attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size)
+ attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
return ERR_PTR(-ENOMEM);
array_size = sizeof(*array) + attr->max_entries * elem_size;
@@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
/* all elements already exist */
return -EEXIST;
- memcpy(array->value + array->elem_size * index, value, array->elem_size);
+ memcpy(array->value + array->elem_size * index, value, map->value_size);
return 0;
}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 19909b2..34777b3 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/
goto free_htab;
- err = -ENOMEM;
+ if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+ MAX_BPF_STACK - sizeof(struct htab_elem))
+ /* if value_size is bigger, the user space won't be able to
+ * access the elements via bpf syscall. This check also makes
+ * sure that the elem_size doesn't overflow and it's
+ * kmalloc-able later in htab_map_update_elem()
+ */
+ goto free_htab;
+
+ htab->elem_size = sizeof(struct htab_elem) +
+ round_up(htab->map.key_size, 8) +
+ htab->map.value_size;
+
/* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 ||
htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
goto free_htab;
+ if ((u64) htab->n_buckets * sizeof(struct hlist_head) +
+ (u64) htab->elem_size * htab->map.max_entries >=
+ U32_MAX - PAGE_SIZE)
+ /* make sure page count doesn't overflow */
+ goto free_htab;
+
+ htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
+ htab->elem_size * htab->map.max_entries,
+ PAGE_SIZE) >> PAGE_SHIFT;
+
+ err = -ENOMEM;
htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
GFP_USER | __GFP_NOWARN);
@@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
raw_spin_lock_init(&htab->lock);
htab->count = 0;
- htab->elem_size = sizeof(struct htab_elem) +
- round_up(htab->map.key_size, 8) +
- htab->map.value_size;
-
- htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
- htab->elem_size * htab->map.max_entries,
- PAGE_SIZE) >> PAGE_SHIFT;
return &htab->map;
free_htab:
@@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
WARN_ON_ONCE(!rcu_read_lock_held());
/* allocate new element outside of lock */
- l_new = kmalloc(htab->elem_size, GFP_ATOMIC);
+ l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return -ENOMEM;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index be6d726..5a8a797 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
break;
case BPF_TYPE_MAP:
- atomic_inc(&((struct bpf_map *)raw)->refcnt);
+ bpf_map_inc(raw, true);
break;
default:
WARN_ON_ONCE(1);
@@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type)
bpf_prog_put(raw);
break;
case BPF_TYPE_MAP:
- bpf_map_put(raw);
+ bpf_map_put_with_uref(raw);
break;
default:
WARN_ON_ONCE(1);
@@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
void *raw;
*type = BPF_TYPE_MAP;
- raw = bpf_map_get(ufd);
+ raw = bpf_map_get_with_uref(ufd);
if (IS_ERR(raw)) {
*type = BPF_TYPE_PROG;
raw = bpf_prog_get(ufd);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0d3313d..3b39550 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work)
map->ops->map_free(map);
}
+static void bpf_map_put_uref(struct bpf_map *map)
+{
+ if (atomic_dec_and_test(&map->usercnt)) {
+ if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+ bpf_fd_array_map_clear(map);
+ }
+}
+
/* decrement map refcnt and schedule it for freeing via workqueue
* (unrelying map implementation ops->map_free() might sleep)
*/
@@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map)
}
}
-static int bpf_map_release(struct inode *inode, struct file *filp)
+void bpf_map_put_with_uref(struct bpf_map *map)
{
- struct bpf_map *map = filp->private_data;
-
- if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
- /* prog_array stores refcnt-ed bpf_prog pointers
- * release them all when user space closes prog_array_fd
- */
- bpf_fd_array_map_clear(map);
-
+ bpf_map_put_uref(map);
bpf_map_put(map);
+}
+
+static int bpf_map_release(struct inode *inode, struct file *filp)
+{
+ bpf_map_put_with_uref(filp->private_data);
return 0;
}
@@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr)
return PTR_ERR(map);
atomic_set(&map->refcnt, 1);
+ atomic_set(&map->usercnt, 1);
err = bpf_map_charge_memlock(map);
if (err)
@@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
-struct bpf_map *bpf_map_get(u32 ufd)
+void bpf_map_inc(struct bpf_map *map, bool uref)
+{
+ atomic_inc(&map->refcnt);
+ if (uref)
+ atomic_inc(&map->usercnt);
+}
+
+struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
struct fd f = fdget(ufd);
struct bpf_map *map;
@@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
if (IS_ERR(map))
return map;
- atomic_inc(&map->refcnt);
+ bpf_map_inc(map, true);
fdput(f);
return map;
@@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr)
goto free_key;
err = -ENOMEM;
- value = kmalloc(map->value_size, GFP_USER);
+ value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
@@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr)
goto free_key;
err = -ENOMEM;
- value = kmalloc(map->value_size, GFP_USER);
+ value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c607305..a7945d1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
- atomic_inc(&map->refcnt);
-
+ bpf_map_inc(map, false);
fdput(f);
next_insn:
insn++;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a3bffd1..70306cc 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
if (signal_pending(current) || !timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
__set_current_state(TASK_RUNNING);
@@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c913538..ffed8a1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
+ /* No need to call l2cap_chan_hold() here since we already own
+ * the reference taken in smp_new_conn_cb(). This is just the
+ * first time that we tie it to a specific pointer. The code in
+ * l2cap_core.c ensures that there's no risk this function wont
+ * get called if smp_new_conn_cb was previously called.
+ */
conn->smp = chan;
- l2cap_chan_hold(chan);
if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
bredr_pairing(chan);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index cc85891..aa209b1 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
!timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
if (sock_flag(sk, SOCK_DEAD))
break;
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
finish_wait(sk_sleep(sk), &wait);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 617088a..d62af69 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e6af42d..f18ae91 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
ndm->ndm_pad2 = 0;
ndm->ndm_flags = pn->flags | NTF_PROXY;
ndm->ndm_type = RTN_UNICAST;
- ndm->ndm_ifindex = pn->dev->ifindex;
+ ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
ndm->ndm_state = NUD_NONE;
if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (h > s_h)
s_idx = 0;
for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
- if (dev_net(n->dev) != net)
+ if (pneigh_net(n) != net)
continue;
if (idx < s_idx)
goto next;
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6441f47..2e4df84 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
kfree(css_cls_state(css));
}
-static int update_classid(const void *v, struct file *file, unsigned n)
+static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
int err;
struct socket *sock = sock_from_file(file, &err);
@@ -67,18 +67,25 @@ static int update_classid(const void *v, struct file *file, unsigned n)
return 0;
}
-static void cgrp_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void update_classid(struct cgroup_subsys_state *css, void *v)
{
- struct cgroup_cls_state *cs = css_cls_state(css);
- void *v = (void *)(unsigned long)cs->classid;
+ struct css_task_iter it;
struct task_struct *p;
- cgroup_taskset_for_each(p, tset) {
+ css_task_iter_start(css, &it);
+ while ((p = css_task_iter_next(&it))) {
task_lock(p);
- iterate_fd(p->files, 0, update_classid, v);
+ iterate_fd(p->files, 0, update_classid_sock, v);
task_unlock(p);
}
+ css_task_iter_end(&it);
+}
+
+static void cgrp_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
+{
+ update_classid(css,
+ (void *)(unsigned long)css_cls_state(css)->classid);
}
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -89,8 +96,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
u64 value)
{
- css_cls_state(css)->classid = (u32) value;
+ struct cgroup_cls_state *cs = css_cls_state(css);
+
+ cs->classid = (u32)value;
+ update_classid(css, (void *)(unsigned long)cs->classid);
return 0;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 3b6899b..8a1741b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_SPACE(i*sizeof(int));
+ if (msg->msg_controllen < cmlen)
+ cmlen = msg->msg_controllen;
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 1e4dd54..e31dfce 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
skb_queue_head_init(&newsk->sk_receive_queue);
skb_queue_head_init(&newsk->sk_write_queue);
- spin_lock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock);
lockdep_set_class_and_name(&newsk->sk_callback_lock,
af_callback_keys + newsk->sk_family,
@@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
u32 max_segs = 1;
- __sk_dst_set(sk, dst);
+ sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
@@ -1815,7 +1814,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (!timeo)
break;
@@ -1861,7 +1860,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
break;
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
@@ -2048,9 +2047,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
finish_wait(sk_sleep(sk), &wait);
return rc;
}
@@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
} else
sk->sk_wq = NULL;
- spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index d70f77a..b96f7a7 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk)
wake_up_interruptible_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
- sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+ sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
}
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
while (1) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
}
if (signal_pending(current))
goto do_interrupted;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
if (sk_stream_memory_free(sk) && !vm_wait)
break;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index db5fc24..9c6d050 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
&ireq->ir_v6_loc_addr,
&ireq->ir_v6_rmt_addr);
fl6.daddr = ireq->ir_v6_rmt_addr;
- err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+ rcu_read_lock();
+ err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ np->tclass);
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *newnp;
const struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
* comment in that function for the gory details. -acme
*/
- __ip6_dst_store(newsk, dst, NULL, NULL);
+ ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
- if (np->opt != NULL)
- newnp->opt = ipv6_dup_options(newsk, np->opt);
-
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ opt = ipv6_dup_options(newsk, opt);
+ RCU_INIT_POINTER(newnp->opt, opt);
+ }
inet_csk(newsk)->icsk_ext_hdr_len = 0;
- if (newnp->opt != NULL)
- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
- newnp->opt->opt_flen);
+ if (opt)
+ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
+ opt->opt_flen;
dccp_sync_mss(newsk, dst_mtu(dst));
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
+ struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
- __ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
icsk->icsk_ext_hdr_len = 0;
- if (np->opt != NULL)
- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
- np->opt->opt_nflen);
+ if (opt)
+ icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
inet->inet_dport = usin->sin6_port;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b5cf13a..41e6580 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
- set_bit(SOCK_ASYNC_NOSPACE,
- &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 675cf94..eebf5ac 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1747,9 +1747,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
finish_wait(sk_sleep(sk), &wait);
}
@@ -2004,10 +2004,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo,
!dn_queue_too_long(scp, queue, flags));
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
finish_wait(sk_sleep(sk), &wait);
continue;
}
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 4677b6f..ecc28cf 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -67,7 +67,7 @@
* Returns the size of the result on success, -ve error code otherwise.
*/
int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time_t *_expiry)
+ const char *options, char **_result, time64_t *_expiry)
{
struct key *rkey;
const struct user_key_payload *upayload;
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 35a9788..c7d1adc 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
return;
out:
- WARN_ON_ONCE("HSR: Could not send supervision frame\n");
+ WARN_ONCE(1, "HSR: Could not send supervision frame\n");
kfree_skb(skb);
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6baf36e..05e4cba 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
ASSERT_RTNL();
in_dev = ip_mc_find_dev(net, imr);
- if (!in_dev) {
+ if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
ret = -ENODEV;
goto out;
}
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
- ip_mc_dec_group(in_dev, group);
+ if (in_dev)
+ ip_mc_dec_group(in_dev, group);
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 92dd4b7..c3a3835 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
-static void mroute_clean_tables(struct mr_table *mrt);
+static void mroute_clean_tables(struct mr_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
static void ipmr_free_table(struct mr_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, true);
kfree(mrt);
}
@@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
return dev;
failure:
- /* allow the register to be completed before unregistering. */
- rtnl_unlock();
- rtnl_lock();
-
unregister_netdevice(dev);
return NULL;
}
@@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
return dev;
failure:
- /* allow the register to be completed before unregistering. */
- rtnl_unlock();
- rtnl_lock();
-
unregister_netdevice(dev);
return NULL;
}
@@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct mr_table *mrt)
+static void mroute_clean_tables(struct mr_table *mrt, bool all)
{
int i;
LIST_HEAD(list);
@@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
/* Shut down all active vif entries */
for (i = 0; i < mrt->maxvif; i++) {
- if (!(mrt->vif_table[i].flags & VIFF_STATIC))
- vif_delete(mrt, i, 0, &list);
+ if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
+ continue;
+ vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
@@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
for (i = 0; i < MFC_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
- if (c->mfc_flags & MFC_STATIC)
+ if (!all && (c->mfc_flags & MFC_STATIC))
continue;
list_del_rcu(&c->list);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
@@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk)
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
RCU_INIT_POINTER(mrt->mroute_sk, NULL);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, false);
}
}
rtnl_unlock();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c172877..c82cca1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
- set_bit(SOCK_ASYNC_NOSPACE,
- &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
goto out_err;
}
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
mss_now = tcp_send_mss(sk, &size_goal, flags);
copied = 0;
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
}
/* This should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
mss_now = tcp_send_mss(sk, &size_goal, flags);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fdd88c3..2d656ee 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_buff *skb;
+ int err = -ENOMEM;
+ int data_len = 0;
bool fragstolen;
if (size == 0)
return 0;
- skb = alloc_skb(size, sk->sk_allocation);
+ if (size > PAGE_SIZE) {
+ int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
+
+ data_len = npages << PAGE_SHIFT;
+ size = data_len + (size & ~PAGE_MASK);
+ }
+ skb = alloc_skb_with_frags(size - data_len, data_len,
+ PAGE_ALLOC_COSTLY_ORDER,
+ &err, sk->sk_allocation);
if (!skb)
goto err;
+ skb_put(skb, size - data_len);
+ skb->data_len = data_len;
+ skb->len = size;
+
if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
goto err_free;
- if (memcpy_from_msg(skb_put(skb, size), msg, size))
+ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+ if (err)
goto err_free;
TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
err_free:
kfree_skb(skb);
err:
- return -ENOMEM;
+ return err;
+
}
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
@@ -5667,6 +5683,7 @@ discard:
}
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tp->copied_seq = tp->rcv_nxt;
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ba09016..db00343 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
}
md5sig = rcu_dereference_protected(tp->md5sig_info,
- sock_owned_by_user(sk));
+ sock_owned_by_user(sk) ||
+ lockdep_is_held(&sk->sk_lock.slock));
if (!md5sig) {
md5sig = kmalloc(sizeof(*md5sig), gfp);
if (!md5sig)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c9c716a..193ba1f 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk)
dst_negative_advice(sk);
if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
- if (tp->syn_data)
+ if (tp->syn_data && icsk->icsk_retransmits == 1)
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
}
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
syn_set = true;
} else {
if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+ /* Some middle-boxes may black-hole Fast Open _after_
+ * the handshake. Therefore we conservatively disable
+ * Fast Open on this path on recurring timeouts with
+ * few or zero bytes acked after Fast Open.
+ */
+ if (tp->syn_data_acked &&
+ tp->bytes_acked <= tp->rx_opt.mss_clamp) {
+ tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
+ if (icsk->icsk_retransmits == sysctl_tcp_retries1)
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ }
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 24ec14f9..0c7b0e6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -100,7 +100,6 @@
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
-#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d84742f..61f2685 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3642,7 +3642,7 @@ static void addrconf_dad_work(struct work_struct *w)
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
- ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL);
+ ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any);
out:
in6_ifa_put(ifp);
rtnl_unlock();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 44bb66b..8ec0df7 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk)
/* Free tx options */
- opt = xchg(&np->opt, NULL);
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
}
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
+ &final);
+ rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -668,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
return PTR_ERR(dst);
}
- __ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
}
return 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index d70b023..517c55b 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,8 +167,10 @@ ipv4_connected:
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- opt = flowlabel ? flowlabel->opt : np->opt;
+ rcu_read_lock();
+ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
final_p = fl6_update_dst(&fl6, opt, &final);
+ rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
err = 0;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ce203b0..ea7c4d6 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
*((char **)&opt2->dst1opt) += dif;
if (opt2->srcrt)
*((char **)&opt2->srcrt) += dif;
+ atomic_set(&opt2->refcnt, 1);
}
return opt2;
}
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
return ERR_PTR(-ENOBUFS);
memset(opt2, 0, tot_len);
-
+ atomic_set(&opt2->refcnt, 1);
opt2->tot_len = tot_len;
p = (char *)(opt2 + 1);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 36c5a98..0a37ddc 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
}
-/*
- * Special lock-class for __icmpv6_sk:
- */
-static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
-
static int __net_init icmpv6_sk_init(struct net *net)
{
struct sock *sk;
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net)
net->ipv6.icmp_sk[i] = sk;
- /*
- * Split off their lock-class, because sk->sk_dst_lock
- * gets used from softirqs, which is safe for
- * __icmpv6_sk (because those never get directly used
- * via userspace syscalls), but unsafe for normal sockets.
- */
- lockdep_set_class(&sk->sk_dst_lock,
- &icmpv6_socket_sk_dst_lock_key);
-
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
*/
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 5d1c7ce..a7ca2cd 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = proto;
fl6->daddr = ireq->ir_v6_rmt_addr;
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
fl6->saddr = ireq->ir_v6_loc_addr;
fl6->flowi6_oif = ireq->ir_iif;
fl6->flowi6_mark = ireq->ir_mark;
@@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
-void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr)
-{
- __ip6_dst_store(sk, dst, daddr, saddr);
-}
-
-static inline
struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
{
return __sk_dst_check(sk, cookie);
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
fl6->fl6_dport = inet->inet_dport;
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
if (!dst) {
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!IS_ERR(dst))
- __inet6_csk_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
}
return dst;
}
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
/* Restore final destination back after routing done */
fl6.daddr = sk->sk_v6_daddr;
- res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+ res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ np->tclass);
rcu_read_unlock();
return res;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eabffbb..137fca4 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
int i;
for_each_possible_cpu(i)
- ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
+ ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index ad19136..a10e771 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
int cmd);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
-static void mroute_clean_tables(struct mr6_table *mrt);
+static void mroute_clean_tables(struct mr6_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr6_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, true);
kfree(mrt);
}
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
return dev;
failure:
- /* allow the register to be completed before unregistering. */
- rtnl_unlock();
- rtnl_lock();
-
unregister_netdevice(dev);
return NULL;
}
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct mr6_table *mrt)
+static void mroute_clean_tables(struct mr6_table *mrt, bool all)
{
int i;
LIST_HEAD(list);
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
* Shut down all active vif entries
*/
for (i = 0; i < mrt->maxvif; i++) {
- if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
- mif6_delete(mrt, i, &list);
+ if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
+ continue;
+ mif6_delete(mrt, i, &list);
}
unregister_netdevice_many(&list);
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
*/
for (i = 0; i < MFC6_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
- if (c->mfc_flags & MFC_STATIC)
+ if (!all && (c->mfc_flags & MFC_STATIC))
continue;
write_lock_bh(&mrt_lock);
list_del(&c->list);
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk)
net->ipv6.devconf_all);
write_unlock_bh(&mrt_lock);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, false);
err = 0;
break;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63e6956..4449ad1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
}
- opt = xchg(&inet6_sk(sk)->opt, opt);
+ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
+ opt);
sk_dst_reset(sk);
return opt;
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
}
- opt = xchg(&np->opt, NULL);
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ opt = xchg((__force struct ipv6_txoptions **)&np->opt,
+ NULL);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt);
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
break;
- opt = ipv6_renew_options(sk, np->opt, optname,
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ opt = ipv6_renew_options(sk, opt, optname,
(struct ipv6_opt_hdr __user *)optval,
optlen);
if (IS_ERR(opt)) {
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = 0;
opt = ipv6_update_options(sk, opt);
sticky_done:
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
break;
}
@@ -486,6 +493,7 @@ sticky_done:
break;
memset(opt, 0, sizeof(*opt));
+ atomic_set(&opt->refcnt, 1);
opt->tot_len = sizeof(*opt) + optlen;
retv = -EFAULT;
if (copy_from_user(opt+1, optval, optlen))
@@ -502,8 +510,10 @@ update:
retv = 0;
opt = ipv6_update_options(sk, opt);
done:
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
break;
}
case IPV6_UNICAST_HOPS:
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
case IPV6_RTHDR:
case IPV6_DSTOPTS:
{
+ struct ipv6_txoptions *opt;
lock_sock(sk);
- len = ipv6_getsockopt_sticky(sk, np->opt,
- optname, optval, len);
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
release_sock(sk);
/* check if ipv6_getsockopt_sticky() returns err code */
if (len < 0)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3e0f855..d6161e1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
}
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct sk_buff *oskb)
+ const struct in6_addr *daddr, const struct in6_addr *saddr)
{
struct sk_buff *skb;
struct in6_addr addr_buf;
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
dev->dev_addr);
- if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
- skb_dst_copy(skb, oskb);
-
ndisc_send_skb(skb, daddr, saddr);
}
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
"%s: trying to ucast probe in NUD_INVALID: %pI6\n",
__func__, target);
}
- ndisc_send_ns(dev, target, target, saddr, skb);
+ ndisc_send_ns(dev, target, target, saddr);
} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
neigh_app_ns(neigh);
} else {
addrconf_addr_solict_mult(target, &mcaddr);
- ndisc_send_ns(dev, target, &mcaddr, saddr, skb);
+ ndisc_send_ns(dev, target, &mcaddr, saddr);
}
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index d5efeb8..bab4441 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
/* Creation primitives. */
static inline struct frag_queue *fq_find(struct net *net, __be32 id,
u32 user, struct in6_addr *src,
- struct in6_addr *dst, u8 ecn)
+ struct in6_addr *dst, int iif, u8 ecn)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
arg.user = user;
arg.src = src;
arg.dst = dst;
+ arg.iif = iif;
arg.ecn = ecn;
local_bh_disable();
@@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
fhdr = (struct frag_hdr *)skb_transport_header(clone);
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
- ip6_frag_ecn(hdr));
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
goto ret_orig;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dc65ec1..9914098 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
+ struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
- if (!opt)
- opt = np->opt;
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
@@ -906,6 +909,7 @@ done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 44e21a0..45f5ae5 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
return fq->id == arg->id &&
fq->user == arg->user &&
ipv6_addr_equal(&fq->saddr, arg->src) &&
- ipv6_addr_equal(&fq->daddr, arg->dst);
+ ipv6_addr_equal(&fq->daddr, arg->dst) &&
+ (arg->iif == fq->iif ||
+ !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
+ IPV6_ADDR_LINKLOCAL)));
}
EXPORT_SYMBOL(ip6_frag_match);
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
static struct frag_queue *
fq_find(struct net *net, __be32 id, const struct in6_addr *src,
- const struct in6_addr *dst, u8 ecn)
+ const struct in6_addr *dst, int iif, u8 ecn)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
arg.user = IP6_DEFRAG_LOCAL_DELIVER;
arg.src = src;
arg.dst = dst;
+ arg.iif = iif;
arg.ecn = ecn;
hash = inet6_hash_frag(id, src, dst);
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
}
fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
- ip6_frag_ecn(hdr));
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
if (fq) {
int ret;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6f01fe1..826e6aa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -523,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w)
container_of(w, struct __rt6_probe_work, work);
addrconf_addr_solict_mult(&work->target, &mcaddr);
- ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL);
+ ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
dev_put(work->dev);
kfree(work);
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb8f2fa..eaf7ac4 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = ireq->ir_v6_rmt_addr;
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
fl6.saddr = ireq->ir_v6_loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = ireq->ir_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c5429a6..e7aab56 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
+ struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ final_p = fl6_update_dst(&fl6, opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
@@ -255,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
- __ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
- if (np->opt)
- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
- np->opt->opt_nflen);
+ if (opt)
+ icsk->icsk_ext_hdr_len = opt->opt_flen +
+ opt->opt_nflen;
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
- err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
+ np->tclass);
err = net_xmit_eval(err);
}
@@ -972,6 +975,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp;
const struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_sock *newtp;
@@ -1056,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
- __ip6_dst_store(newsk, dst, NULL, NULL);
+ ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1098,13 +1102,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
but we make one more one thing there: reattach optmem
to newsk.
*/
- if (np->opt)
- newnp->opt = ipv6_dup_options(newsk, np->opt);
-
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ opt = ipv6_dup_options(newsk, opt);
+ RCU_INIT_POINTER(newnp->opt, opt);
+ }
inet_csk(newsk)->icsk_ext_hdr_len = 0;
- if (newnp->opt)
- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
- newnp->opt->opt_flen);
+ if (opt)
+ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
+ opt->opt_flen;
tcp_ca_openreq_child(newsk, dst);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 01bcb49..9da3287 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
+ struct ipv6_txoptions *opt_to_free = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
@@ -1263,8 +1264,10 @@ do_udp_sendmsg:
opt = NULL;
connected = 0;
}
- if (!opt)
- opt = np->opt;
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
@@ -1373,6 +1376,7 @@ release_dst:
out:
dst_release(dst);
fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
if (!err)
return len;
/*
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index fcb2752..435608c 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk) && iucv_below_msglim(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index aca38d8..a2c8747 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = NULL;
}
- if (opt == NULL)
- opt = np->opt;
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
@@ -631,6 +634,7 @@ done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
return err < 0 ? err : len;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index a758eb84..ff75718 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, start_seq_num,
- local->hw.max_tx_aggregation_subframes,
+ IEEE80211_MAX_AMPDU_BUF,
tid_tx->timeout);
}
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
+ buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
mutex_lock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c2bd1b6..da471ee 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3454,8 +3454,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
goto out_unlock;
}
} else {
- /* for cookie below */
- ack_skb = skb;
+ /* Assign a dummy non-zero cookie, it's not sent to
+ * userspace in this case but we rely on its value
+ * internally in the need_offchan case to distinguish
+ * mgmt-tx from remain-on-channel.
+ */
+ *cookie = 0xffffffff;
}
if (!need_offchan) {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d0dc1bf..c9e325d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
bool update_bss)
{
- if (__ieee80211_recalc_txpower(sdata) || update_bss)
+ if (__ieee80211_recalc_txpower(sdata) ||
+ (update_bss && ieee80211_sdata_running(sdata)))
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
}
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
unregister_netdevice(sdata->dev);
} else {
cfg80211_unregister_wdev(&sdata->wdev);
+ ieee80211_teardown_sdata(sdata);
kfree(sdata);
}
}
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
return;
ieee80211_do_stop(sdata, true);
- ieee80211_teardown_sdata(sdata);
}
void ieee80211_remove_interfaces(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 858f6b1..175ffcf 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_VIF_TXPOWER |
NL80211_FEATURE_MAC_ON_CREATE |
- NL80211_FEATURE_USERSPACE_MPM |
- NL80211_FEATURE_FULL_AP_CLIENT_STATE;
+ NL80211_FEATURE_USERSPACE_MPM;
if (!ops->hw_scan)
wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b890e22..b3b44a5 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta)
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
del_timer_sync(&node->mpath->timer);
- atomic_dec(&sdata->u.mesh.mpaths);
kfree(node->mpath);
kfree(node);
}
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
- struct mesh_path *mpath;
- mpath = node->mpath;
+ struct mesh_path *mpath = node->mpath;
+ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
spin_lock(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING;
if (mpath->is_gate)
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
hlist_del_rcu(&node->list);
call_rcu(&node->rcu, mesh_path_node_reclaim);
spin_unlock(&mpath->state_lock);
+ atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4aeca4b..a413e52 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
/* We need to ensure power level is at max for scanning. */
ieee80211_hw_config(local, 0);
- if ((req->channels[0]->flags &
- IEEE80211_CHAN_NO_IR) ||
+ if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
+ IEEE80211_CHAN_RADAR)) ||
!req->n_ssids) {
next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
} else {
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
* TODO: channel switching also consumes quite some time,
* add that delay as well to get a better estimation
*/
- if (chan->flags & IEEE80211_CHAN_NO_IR)
+ if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
return IEEE80211_PASSIVE_CHANNEL_TIME;
return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
}
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
*
* In any case, it is not necessary for a passive scan.
*/
- if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) {
+ if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
+ !scan_req->n_ssids) {
*next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
return;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index b7de0da..ecf0a01 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
pr_debug("mask 0x%x\n", mask);
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index a7a80a6..653d073 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
- if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
+ if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
continue;
if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index efb736b..e41cd12 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
.destroy = ovs_netdev_tunnel_destroy,
.get_options = geneve_get_options,
.send = dev_queue_xmit,
- .owner = THIS_MODULE,
};
static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index c3257d7..7f8897f 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = {
.create = gre_create,
.send = dev_queue_xmit,
.destroy = ovs_netdev_tunnel_destroy,
- .owner = THIS_MODULE,
};
static int __init ovs_gre_tnl_init(void)
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index b327368..6b0190b 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
ovs_netdev_detach_dev(vport);
- /* Early release so we can unregister the device */
+ /* We can be invoked by both explicit vport deletion and
+ * underlying netdev deregistration; delete the link only
+ * if it's not already shutting down.
+ */
+ if (vport->dev->reg_state == NETREG_REGISTERED)
+ rtnl_delete_link(vport->dev);
dev_put(vport->dev);
- rtnl_delete_link(vport->dev);
vport->dev = NULL;
rtnl_unlock();
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 0ac0fd0..31cbc8c 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name)
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
}
-int ovs_vport_ops_register(struct vport_ops *ops)
+int __ovs_vport_ops_register(struct vport_ops *ops)
{
int err = -EEXIST;
struct vport_ops *o;
@@ -87,7 +87,7 @@ errout:
ovs_unlock();
return err;
}
-EXPORT_SYMBOL_GPL(ovs_vport_ops_register);
+EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
void ovs_vport_ops_unregister(struct vport_ops *ops)
{
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
*
* @vport: vport to delete.
*
- * Detaches @vport from its datapath and destroys it. It is possible to fail
- * for reasons such as lack of memory. ovs_mutex must be held.
+ * Detaches @vport from its datapath and destroys it. ovs_mutex must
+ * be held.
*/
void ovs_vport_del(struct vport *vport)
{
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bdfd82a..8ea3a96 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport)
return vport->dev->name;
}
-int ovs_vport_ops_register(struct vport_ops *ops);
+int __ovs_vport_ops_register(struct vport_ops *ops);
+#define ovs_vport_ops_register(ops) \
+ ({ \
+ (ops)->owner = THIS_MODULE; \
+ __ovs_vport_ops_register(ops); \
+ })
+
void ovs_vport_ops_unregister(struct vport_ops *ops);
static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1cf928f..992396a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2329,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
static bool ll_header_truncated(const struct net_device *dev, int len)
{
/* net device doesn't like empty head */
- if (unlikely(len <= dev->hard_header_len)) {
- net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
+ if (unlikely(len < dev->hard_header_len)) {
+ net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
current->comm, len, dev->hard_header_len);
return true;
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index d456403..e3b118c 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net,
}
}
- if (trans == NULL) {
- kmem_cache_free(rds_conn_slab, conn);
- conn = ERR_PTR(-ENODEV);
- goto out;
- }
-
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
diff --git a/net/rds/send.c b/net/rds/send.c
index 827155c..c9cdb35 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
release_sock(sk);
}
- /* racing with another thread binding seems ok here */
+ lock_sock(sk);
if (daddr == 0 || rs->rs_bound_addr == 0) {
+ release_sock(sk);
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
+ release_sock(sk);
if (payload_len > rds_sk_sndbuf(rs)) {
ret = -EMSGSIZE;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index e0547f5..adc555e 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -723,8 +723,10 @@ process_further:
if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
- hard > tx)
+ hard > tx) {
+ call->acks_hard = tx;
goto all_acked;
+ }
smp_rmb();
rxrpc_rotate_tx_window(call, hard - 1);
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index a40d3af..14c4e12 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
/* this should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
return -EPIPE;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f43c8f3..7ec667d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
}
/* We know handle. Find qdisc among all qdisc's attached to device
- (root qdisc, all its children, children of children etc.)
+ * (root qdisc, all its children, children of children etc.)
+ * Note: caller either uses rtnl or rcu_read_lock()
*/
static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
root->handle == handle)
return root;
- list_for_each_entry(q, &root->list, list) {
+ list_for_each_entry_rcu(q, &root->list, list) {
if (q->handle == handle)
return q;
}
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
struct Qdisc *root = qdisc_dev(q)->qdisc;
WARN_ON_ONCE(root == &noop_qdisc);
- list_add_tail(&q->list, &root->list);
+ ASSERT_RTNL();
+ list_add_tail_rcu(&q->list, &root->list);
}
}
EXPORT_SYMBOL(qdisc_list_add);
void qdisc_list_del(struct Qdisc *q)
{
- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
- list_del(&q->list);
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ ASSERT_RTNL();
+ list_del_rcu(&q->list);
+ }
}
EXPORT_SYMBOL(qdisc_list_del);
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
if (n == 0)
return;
drops = max_t(int, n, 0);
+ rcu_read_lock();
while ((parentid = sch->parent)) {
if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
- return;
+ break;
+ if (sch->flags & TCQ_F_NOPARENT)
+ break;
+ /* TODO: perform the search on a per txq basis */
sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
- WARN_ON(parentid != TC_H_ROOT);
- return;
+ WARN_ON_ONCE(parentid != TC_H_ROOT);
+ break;
}
cops = sch->ops->cl_ops;
if (cops->qlen_notify) {
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
sch->q.qlen -= n;
__qdisc_qstats_drop(sch, drops);
}
+ rcu_read_unlock();
}
EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
}
lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
if (!netif_is_multiqueue(dev))
- sch->flags |= TCQ_F_ONETXQUEUE;
+ sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->handle = handle;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cb5d4ad..e82a1ad 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
return;
}
if (!netif_is_multiqueue(dev))
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
dev_queue->qdisc_sleeping = qdisc;
}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index f3cbaec..3e82f04 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
if (qdisc == NULL)
goto err;
priv->qdiscs[ntx] = qdisc;
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
*old = dev_graft_qdisc(dev_queue, new);
if (new)
- new->flags |= TCQ_F_ONETXQUEUE;
+ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 3811a74..ad70ecf 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
goto err;
}
priv->qdiscs[i] = qdisc;
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
/* If the mqprio options indicate that hardware should own
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
*old = dev_graft_qdisc(dev_queue, new);
if (new)
- new->flags |= TCQ_F_ONETXQUEUE;
+ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e917d27..acb45b8 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
struct sock *sk = skb->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi6 *fl6 = &transport->fl.u.ip6;
+ int res;
pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
skb->len, &fl6->saddr, &fl6->daddr);
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
- return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+ rcu_read_lock();
+ res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+ rcu_read_unlock();
+ return res;
}
/* Returns the dst cache entry for the given source and destination ip
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
pr_debug("src=%pI6 - ", &fl6->saddr);
}
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
+
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!asoc || saddr)
goto out;
@@ -321,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (baddr) {
fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 897c01c..03c8256 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
- kaddrs = kmalloc(addrs_size, GFP_KERNEL);
+ kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
if (unlikely(!kaddrs))
return -ENOMEM;
@@ -4928,7 +4928,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
to = optval + offsetof(struct sctp_getaddrs, addrs);
space_left = len - offsetof(struct sctp_getaddrs, addrs);
- addrs = kmalloc(space_left, GFP_KERNEL);
+ addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
@@ -6458,7 +6458,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sctp_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/*
* Since the socket is not locked, the buffer
* might be made available after the writeable check and
@@ -6801,26 +6801,30 @@ no_packet:
static void __sctp_write_space(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
- struct socket *sock = sk->sk_socket;
- if ((sctp_wspace(asoc) > 0) && sock) {
- if (waitqueue_active(&asoc->wait))
- wake_up_interruptible(&asoc->wait);
+ if (sctp_wspace(asoc) <= 0)
+ return;
+
+ if (waitqueue_active(&asoc->wait))
+ wake_up_interruptible(&asoc->wait);
- if (sctp_writeable(sk)) {
- wait_queue_head_t *wq = sk_sleep(sk);
+ if (sctp_writeable(sk)) {
+ struct socket_wq *wq;
- if (wq && waitqueue_active(wq))
- wake_up_interruptible(wq);
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq) {
+ if (waitqueue_active(&wq->wait))
+ wake_up_interruptible(&wq->wait);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
- sock_wake_async(sock,
- SOCK_WAKE_SPACE, POLL_OUT);
+ sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
}
+ rcu_read_unlock();
}
}
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = {
#if IS_ENABLED(CONFIG_IPV6)
+#include <net/transp_v6.h>
+static void sctp_v6_destroy_sock(struct sock *sk)
+{
+ sctp_destroy_sock(sk);
+ inet6_destroy_sock(sk);
+}
+
struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = {
.accept = sctp_accept,
.ioctl = sctp_ioctl,
.init = sctp_init_sock,
- .destroy = sctp_destroy_sock,
+ .destroy = sctp_v6_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
diff --git a/net/socket.c b/net/socket.c
index dd2c247..456fadb 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
return 0;
}
-/* This function may be called only under socket lock or callback_lock or rcu_lock */
+/* This function may be called only under rcu_lock */
-int sock_wake_async(struct socket *sock, int how, int band)
+int sock_wake_async(struct socket_wq *wq, int how, int band)
{
- struct socket_wq *wq;
-
- if (!sock)
- return -1;
- rcu_read_lock();
- wq = rcu_dereference(sock->wq);
- if (!wq || !wq->fasync_list) {
- rcu_read_unlock();
+ if (!wq || !wq->fasync_list)
return -1;
- }
+
switch (how) {
case SOCK_WAKE_WAITD:
- if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
+ if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags))
break;
goto call_kill;
case SOCK_WAKE_SPACE:
- if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
+ if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
break;
/* fall through */
case SOCK_WAKE_IO:
@@ -1086,7 +1079,7 @@ call_kill:
case SOCK_WAKE_URG:
kill_fasync(&wq->fasync_list, SIGURG, band);
}
- rcu_read_unlock();
+
return 0;
}
EXPORT_SYMBOL(sock_wake_async);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1d1a704..2ffaf6a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
if (unlikely(!sock))
return -ENOTSOCK;
- clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
if (base != 0) {
addr = NULL;
addrlen = 0;
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task)
struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
transport->inet->sk_write_pending--;
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
/**
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task)
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
- if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
+ if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
/*
* Notify TCP that we're limited by the application
* window size
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task)
xprt_wait_for_buffer_space(task, xs_nospace_callback);
}
} else {
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
ret = -ENOTCONN;
}
@@ -626,7 +626,7 @@ process_status:
case -EPERM:
/* When the server has died, an ICMP port unreachable message
* prompts ECONNREFUSED. */
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
return status;
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
case -EADDRINUSE:
case -ENOBUFS:
case -EPIPE:
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
return status;
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk)
if (unlikely(!(xprt = xprt_from_sock(sk))))
return;
- if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
+ if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
return;
xprt_write_space(xprt);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 9efbdbd..91aea07 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l,
snd_l->ackers++;
rcv_l->acked = snd_l->snd_nxt - 1;
+ snd_l->state = LINK_ESTABLISHED;
tipc_link_build_bc_init_msg(uc_l, xmitq);
}
@@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
rcv_l->state = LINK_RESET;
if (!snd_l->ackers) {
tipc_link_reset(snd_l);
+ snd_l->state = LINK_RESET;
__skb_queue_purge(xmitq);
}
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 552dbab..b53246f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -105,6 +105,7 @@ struct tipc_sock {
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
+static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
+ sk->sk_destruct = tipc_sock_destruct;
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk->sent_unacked = 0;
atomic_set(&tsk->dupl_rcvcnt, 0);
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock)
tipc_node_remove_conn(net, dnode, tsk->portid);
}
- /* Discard any remaining (connection-based) messages in receive queue */
- __skb_queue_purge(&sk->sk_receive_queue);
-
/* Reject any messages that accumulated in backlog queue */
sock->state = SS_DISCONNECTING;
release_sock(sk);
@@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk)
rcu_read_unlock();
}
+static void tipc_sock_destruct(struct sock *sk)
+{
+ __skb_queue_purge(&sk->sk_receive_queue);
+}
+
/**
* filter_connect - Handle all incoming messages for a connection-based socket
* @tsk: TIPC socket
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ad2719a..70c0327 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
struct rtable *rt;
- if (skb_headroom(skb) < UDP_MIN_HEADROOM)
- pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
+ err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+ if (err)
+ goto tx_error;
+ }
skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
ub = rcu_dereference_rtnl(b->media_ptr);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 955ec15..45aebd9 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,6 +326,118 @@ found:
return s;
}
+/* Support code for asymmetrically connected dgram sockets
+ *
+ * If a datagram socket is connected to a socket not itself connected
+ * to the first socket (eg, /dev/log), clients may only enqueue more
+ * messages if the present receive queue of the server socket is not
+ * "too large". This means there's a second writeability condition
+ * poll and sendmsg need to test. The dgram recv code will do a wake
+ * up on the peer_wait wait queue of a socket upon reception of a
+ * datagram which needs to be propagated to sleeping would-be writers
+ * since these might not have sent anything so far. This can't be
+ * accomplished via poll_wait because the lifetime of the server
+ * socket might be less than that of its clients if these break their
+ * association with it or if the server socket is closed while clients
+ * are still connected to it and there's no way to inform "a polling
+ * implementation" that it should let go of a certain wait queue
+ *
+ * In order to propagate a wake up, a wait_queue_t of the client
+ * socket is enqueued on the peer_wait queue of the server socket
+ * whose wake function does a wake_up on the ordinary client socket
+ * wait queue. This connection is established whenever a write (or
+ * poll for write) hit the flow control condition and broken when the
+ * association to the server socket is dissolved or after a wake up
+ * was relayed.
+ */
+
+static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
+ void *key)
+{
+ struct unix_sock *u;
+ wait_queue_head_t *u_sleep;
+
+ u = container_of(q, struct unix_sock, peer_wake);
+
+ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
+ q);
+ u->peer_wake.private = NULL;
+
+ /* relaying can only happen while the wq still exists */
+ u_sleep = sk_sleep(&u->sk);
+ if (u_sleep)
+ wake_up_interruptible_poll(u_sleep, key);
+
+ return 0;
+}
+
+static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
+{
+ struct unix_sock *u, *u_other;
+ int rc;
+
+ u = unix_sk(sk);
+ u_other = unix_sk(other);
+ rc = 0;
+ spin_lock(&u_other->peer_wait.lock);
+
+ if (!u->peer_wake.private) {
+ u->peer_wake.private = other;
+ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
+
+ rc = 1;
+ }
+
+ spin_unlock(&u_other->peer_wait.lock);
+ return rc;
+}
+
+static void unix_dgram_peer_wake_disconnect(struct sock *sk,
+ struct sock *other)
+{
+ struct unix_sock *u, *u_other;
+
+ u = unix_sk(sk);
+ u_other = unix_sk(other);
+ spin_lock(&u_other->peer_wait.lock);
+
+ if (u->peer_wake.private == other) {
+ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
+ u->peer_wake.private = NULL;
+ }
+
+ spin_unlock(&u_other->peer_wait.lock);
+}
+
+static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
+ struct sock *other)
+{
+ unix_dgram_peer_wake_disconnect(sk, other);
+ wake_up_interruptible_poll(sk_sleep(sk),
+ POLLOUT |
+ POLLWRNORM |
+ POLLWRBAND);
+}
+
+/* preconditions:
+ * - unix_peer(sk) == other
+ * - association is stable
+ */
+static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+{
+ int connected;
+
+ connected = unix_dgram_peer_wake_connect(sk, other);
+
+ if (unix_recvq_full(other))
+ return 1;
+
+ if (connected)
+ unix_dgram_peer_wake_disconnect(sk, other);
+
+ return 0;
+}
+
static int unix_writable(const struct sock *sk)
{
return sk->sk_state != TCP_LISTEN &&
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
+
+ unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
}
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
+ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
if (sk == NULL)
@@ -1033,6 +1148,8 @@ restart:
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
unix_peer(sk) = other;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+
unix_state_double_unlock(sk, other);
if (other != old_peer)
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
return err;
}
+static bool unix_passcred_enabled(const struct socket *sock,
+ const struct sock *other)
+{
+ return test_bit(SOCK_PASSCRED, &sock->flags) ||
+ !other->sk_socket ||
+ test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
+}
+
/*
* Some apps rely on write() giving SCM_CREDENTIALS
* We include credentials if source or destination socket
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
{
if (UNIXCB(skb).pid)
return;
- if (test_bit(SOCK_PASSCRED, &sock->flags) ||
- !other->sk_socket ||
- test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+ if (unix_passcred_enabled(sock, other)) {
UNIXCB(skb).pid = get_pid(task_tgid(current));
current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
}
}
+static int maybe_init_creds(struct scm_cookie *scm,
+ struct socket *socket,
+ const struct sock *other)
+{
+ int err;
+ struct msghdr msg = { .msg_controllen = 0 };
+
+ err = scm_send(socket, &msg, scm, false);
+ if (err)
+ return err;
+
+ if (unix_passcred_enabled(socket, other)) {
+ scm->pid = get_pid(task_tgid(current));
+ current_uid_gid(&scm->creds.uid, &scm->creds.gid);
+ }
+ return err;
+}
+
+static bool unix_skb_scm_eq(struct sk_buff *skb,
+ struct scm_cookie *scm)
+{
+ const struct unix_skb_parms *u = &UNIXCB(skb);
+
+ return u->pid == scm->pid &&
+ uid_eq(u->uid, scm->creds.uid) &&
+ gid_eq(u->gid, scm->creds.gid) &&
+ unix_secdata_eq(scm, skb);
+}
+
/*
* Send AF_UNIX data.
*/
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
struct scm_cookie scm;
int max_level;
int data_len = 0;
+ int sk_locked;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
@@ -1550,12 +1703,14 @@ restart:
goto out_free;
}
+ sk_locked = 0;
unix_state_lock(other);
+restart_locked:
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
- if (sock_flag(other, SOCK_DEAD)) {
+ if (unlikely(sock_flag(other, SOCK_DEAD))) {
/*
* Check with 1003.1g - what should
* datagram error
@@ -1563,10 +1718,14 @@ restart:
unix_state_unlock(other);
sock_put(other);
+ if (!sk_locked)
+ unix_state_lock(sk);
+
err = 0;
- unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
@@ -1592,21 +1751,38 @@ restart:
goto out_unlock;
}
- if (unix_peer(other) != sk && unix_recvq_full(other)) {
- if (!timeo) {
- err = -EAGAIN;
- goto out_unlock;
+ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ if (timeo) {
+ timeo = unix_wait_for_peer(other, timeo);
+
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ goto out_free;
+
+ goto restart;
}
- timeo = unix_wait_for_peer(other, timeo);
+ if (!sk_locked) {
+ unix_state_unlock(other);
+ unix_state_double_lock(sk, other);
+ }
- err = sock_intr_errno(timeo);
- if (signal_pending(current))
- goto out_free;
+ if (unix_peer(sk) != other ||
+ unix_dgram_peer_wake_me(sk, other)) {
+ err = -EAGAIN;
+ sk_locked = 1;
+ goto out_unlock;
+ }
- goto restart;
+ if (!sk_locked) {
+ sk_locked = 1;
+ goto restart_locked;
+ }
}
+ if (unlikely(sk_locked))
+ unix_state_unlock(sk);
+
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
@@ -1620,6 +1796,8 @@ restart:
return len;
out_unlock:
+ if (sk_locked)
+ unix_state_unlock(sk);
unix_state_unlock(other);
out_free:
kfree_skb(skb);
@@ -1741,8 +1919,10 @@ out_err:
static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
int offset, size_t size, int flags)
{
- int err = 0;
- bool send_sigpipe = true;
+ int err;
+ bool send_sigpipe = false;
+ bool init_scm = true;
+ struct scm_cookie scm;
struct sock *other, *sk = socket->sk;
struct sk_buff *skb, *newskb = NULL, *tail = NULL;
@@ -1760,7 +1940,7 @@ alloc_skb:
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
&err, 0);
if (!newskb)
- return err;
+ goto err;
}
/* we must acquire readlock as we modify already present
@@ -1769,12 +1949,12 @@ alloc_skb:
err = mutex_lock_interruptible(&unix_sk(other)->readlock);
if (err) {
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
- send_sigpipe = false;
goto err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
err = -EPIPE;
+ send_sigpipe = true;
goto err_unlock;
}
@@ -1783,17 +1963,27 @@ alloc_skb:
if (sock_flag(other, SOCK_DEAD) ||
other->sk_shutdown & RCV_SHUTDOWN) {
err = -EPIPE;
+ send_sigpipe = true;
goto err_state_unlock;
}
+ if (init_scm) {
+ err = maybe_init_creds(&scm, socket, other);
+ if (err)
+ goto err_state_unlock;
+ init_scm = false;
+ }
+
skb = skb_peek_tail(&other->sk_receive_queue);
if (tail && tail == skb) {
skb = newskb;
- } else if (!skb) {
- if (newskb)
+ } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
+ if (newskb) {
skb = newskb;
- else
+ } else {
+ tail = skb;
goto alloc_skb;
+ }
} else if (newskb) {
/* this is fast path, we don't necessarily need to
* call to kfree_skb even though with newskb == NULL
@@ -1814,6 +2004,9 @@ alloc_skb:
atomic_add(size, &sk->sk_wmem_alloc);
if (newskb) {
+ err = unix_scm_to_skb(&scm, skb, false);
+ if (err)
+ goto err_state_unlock;
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, newskb);
spin_unlock(&other->sk_receive_queue.lock);
@@ -1823,7 +2016,7 @@ alloc_skb:
mutex_unlock(&unix_sk(other)->readlock);
other->sk_data_ready(other);
-
+ scm_destroy(&scm);
return size;
err_state_unlock:
@@ -1834,6 +2027,8 @@ err:
kfree_skb(newskb);
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
+ if (!init_scm)
+ scm_destroy(&scm);
return err;
}
@@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
!timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
@@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
if (sock_flag(sk, SOCK_DEAD))
break;
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
finish_wait(sk_sleep(sk), &wait);
@@ -2137,10 +2332,7 @@ unlock:
if (check_creds) {
/* Never glue messages from different writers */
- if ((UNIXCB(skb).pid != scm.pid) ||
- !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
- !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
- !unix_secdata_eq(&scm, skb))
+ if (!unix_skb_scm_eq(skb, &scm))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
@@ -2476,20 +2668,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
return mask;
writable = unix_writable(sk);
- other = unix_peer_get(sk);
- if (other) {
- if (unix_peer(other) != sk) {
- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
- if (unix_recvq_full(other))
- writable = 0;
- }
- sock_put(other);
+ if (writable) {
+ unix_state_lock(sk);
+
+ other = unix_peer(sk);
+ if (other && unix_peer(other) != sk &&
+ unix_recvq_full(other) &&
+ unix_dgram_peer_wake_me(sk, other))
+ writable = 0;
+
+ unix_state_unlock(sk);
}
if (writable)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}